diff --git a/_locales/en/messages.json b/_locales/en/messages.json
index 1f2a3c1b..eae2663d 100644
--- a/_locales/en/messages.json
+++ b/_locales/en/messages.json
@@ -1294,5 +1294,38 @@
"writeStyleForURL": {
"message": "this URL",
"description": "Text for link in toolbar pop-up to write a new style for the current URL"
+ },
+ "syncDropboxStyles": {
+ "message": "Dropbox Export"
+ },
+ "retrieveDropboxSync": {
+ "message": "Dropbox Import"
+ },
+ "overwriteFileExport": {
+ "message": "Do you want to overwrite an existing file?"
+ },
+ "exportSavedSuccess": {
+ "message": "File saved with success"
+ },
+ "noFileToImport": {
+ "message": "To import your styles, you should export it first."
+ },
+ "connectingDropbox": {
+ "message": "Connecting Dropbox..."
+ },
+ "gettingStyles": {
+ "message": "Getting all styles..."
+ },
+ "zipStyles": {
+ "message": "Zipping styles..."
+ },
+ "unzipStyles": {
+ "message": "Unzipping styles..."
+ },
+ "readingStyles": {
+ "message": "Reading styles..."
+ },
+ "uploadingFile": {
+ "message": "Uploading File..."
}
}
diff --git a/manage.html b/manage.html
index df581f3f..d9299c3a 100644
--- a/manage.html
+++ b/manage.html
@@ -161,10 +161,17 @@
+
+
+
+
+
+
+
@@ -364,8 +371,30 @@
diff --git a/manage/manage.css b/manage/manage.css
index eb037482..a2930c50 100644
--- a/manage/manage.css
+++ b/manage/manage.css
@@ -1047,6 +1047,54 @@ input[id^="manage.newUI"] {
text-overflow: ellipsis;
}
+/* export/import buttons */
+#backup-buttons .dropbtn {
+ padding: 3px 7px;
+ cursor: pointer;
+ text-overflow: inherit;
+}
+
+#backup-buttons .dropbtn span {
+ display: inline-block;
+ margin-right: 7px;
+}
+
+#backup-buttons .dropdown {
+ position: relative;
+ display: inline-block;
+}
+
+#backup-buttons .dropdown-content {
+ display: none;
+ position: absolute;
+ background-color: #f9f9f9;
+ min-width: 160px;
+ box-shadow: 0px 8px 16px 0px rgba(0, 0, 0, 0.2);
+ z-index: 1;
+}
+
+#backup-buttons .dropdown-content a {
+ color: black;
+ padding: 12px 16px;
+ text-decoration: none;
+ display: block;
+}
+
+#backup-buttons .dropdown-content a:hover {
+ /* background-color: #f2f2f2 */
+ background-color: #e9e9e9
+}
+
+#backup-buttons .dropdown:hover .dropdown-content {
+ display: block;
+}
+
+#backup-buttons .dropdown:hover .dropbtn {
+ background-color: hsl(0, 0%, 95%);
+ border-color: hsl(0, 0%, 52%);
+ /* background-color: #3e8e41; */
+}
+
/* sort font */
@font-face {
font-family: 'sorticon';
diff --git a/manifest.json b/manifest.json
index 365ec27d..38036031 100644
--- a/manifest.json
+++ b/manifest.json
@@ -19,6 +19,7 @@
"contextMenus",
"storage",
"alarms",
+ "identity",
""
],
"background": {
diff --git a/sync/compress-text.js b/sync/compress-text.js
new file mode 100644
index 00000000..13c2d6c6
--- /dev/null
+++ b/sync/compress-text.js
@@ -0,0 +1,41 @@
+/* global messageBox */
+/* global zip */
+'use strict';
+
+onDOMready().then(() => {
+ zip.workerScriptsPath = '/sync/vendor/zipjs/';
+});
+
+/**
+ * @param {String} filename
+ * @param {String} text content of the file as text
+ * @returns {Promise} resolves to a blob object representing the zip file
+ */
+function createZipFileFromText(filename, text) {
+ return new Promise((resolve, reject) => {
+ zip.createWriter(new zip.BlobWriter('application/zip'), writer => {
+ writer.add(filename, new zip.TextReader(text), function () {
+ writer.close(blob => {
+ resolve(blob);
+ });
+ });
+ }, reject);
+ });
+}
+
+/**
+ * @param {Object} blob object of zip file
+ * @returns {Promise} resolves to a string the content of the first file of the zip
+ */
+function readZipFileFromBlob(blob) {
+ return new Promise((resolve, reject) => {
+ zip.createReader(new zip.BlobReader(blob), zipReader => {
+ zipReader.getEntries(entries => {
+ entries[0].getData(new zip.BlobWriter('text/plain'), data => {
+ zipReader.close();
+ resolve(data);
+ });
+ });
+ }, reject);
+ });
+}
diff --git a/sync/cross-browser-functions.js b/sync/cross-browser-functions.js
new file mode 100644
index 00000000..e570492a
--- /dev/null
+++ b/sync/cross-browser-functions.js
@@ -0,0 +1,24 @@
+'use strict';
+
+/**
+ * @returns {String} returns a redirect URL to be used in |launchWebAuthFlow|
+ */
+function getRedirectUrlAuthFlow() {
+ const browserApi = typeof browser === 'undefined' ? chrome : browser;
+ return browserApi.identity.getRedirectURL();
+}
+
+/**
+ * @param {Object} details based on chrome api
+ * @param {string} details.url url that initiates the auth flow
+ * @param {boolean} details.interactive if it is true a window will be displayed
+ * @return {Promise} returns the url containing the token for extraction
+ */
+function launchWebAuthFlow(details) {
+ if (typeof browser === 'undefined') {
+ return new Promise(resolve => {
+ chrome.identity.launchWebAuthFlow(details, resolve);
+ });
+ }
+ return browser.identity.launchWebAuthFlow(details);
+}
diff --git a/sync/import-export-dropbox.js b/sync/import-export-dropbox.js
new file mode 100644
index 00000000..9da70f19
--- /dev/null
+++ b/sync/import-export-dropbox.js
@@ -0,0 +1,163 @@
+/* global messageBox Dropbox createZipFileFromText readZipFileFromBlob launchWebAuthFlow getRedirectUrlAuthFlow importFromString resolve */
+'use strict';
+
+const DROPBOX_API_KEY = 'zg52vphuapvpng9';
+const FILENAME_ZIP_FILE = 'stylus.json';
+const DROPBOX_FILE = 'stylus.zip';
+const API_ERROR_STATUS_FILE_NOT_FOUND = 409;
+const HTTP_STATUS_CANCEL = 499;
+
+function messageProgressBar(data) {
+ return messageBox({
+ title: `${data.title}`,
+ className: 'config-dialog',
+ contents: [
+ $create('p', data.text)
+ ],
+ buttons: [{
+ textContent: t('confirmClose'),
+ dataset: {cmd: 'close'},
+ }],
+ }).then(() => {
+ document.body.style.minWidth = '';
+ document.body.style.minHeight = '';
+ });
+}
+
+function hasDropboxAccessToken() {
+ return chromeLocal.getValue('dropbox_access_token');
+}
+
+function requestDropboxAccessToken() {
+ const client = new Dropbox.Dropbox({clientId: DROPBOX_API_KEY});
+ const authUrl = client.getAuthenticationUrl(getRedirectUrlAuthFlow());
+ return launchWebAuthFlow({url: authUrl, interactive: true})
+ .then(urlReturned => {
+ const params = new URLSearchParams(new URL(urlReturned).hash.replace('#', ''));
+ chromeLocal.setValue('dropbox_access_token', params.get('access_token'));
+ return params.get('access_token');
+ });
+}
+
+function uploadFileDropbox(client, stylesText) {
+ return client.filesUpload({path: '/' + DROPBOX_FILE, contents: stylesText});
+}
+
+$('#sync-dropbox-export').onclick = () => {
+ const title = t('syncDropboxStyles');
+ messageProgressBar({title: title, text: t('connectingDropbox')});
+
+ hasDropboxAccessToken()
+ .then(token => token || requestDropboxAccessToken())
+ .then(token => {
+ const client = new Dropbox.Dropbox({
+ clientId: DROPBOX_API_KEY,
+ accessToken: token
+ });
+ return client.filesDownload({path: '/' + DROPBOX_FILE})
+ .then(() => messageBox.confirm(t('overwriteFileExport')))
+ .then(ok => {
+ // deletes file if user want to
+ if (!ok) {
+ return Promise.reject({status: HTTP_STATUS_CANCEL});
+ }
+ return client.filesDelete({path: '/' + DROPBOX_FILE});
+ })
+ // file deleted with success, get styles and create a file
+ .then(() => {
+ messageProgressBar({title: title, text: t('gettingStyles')});
+ return API.getStyles().then(styles => JSON.stringify(styles, null, '\t'));
+ })
+ // create zip file
+ .then(stylesText => {
+ messageProgressBar({title: title, text: t('zipStyles')});
+ return createZipFileFromText(FILENAME_ZIP_FILE, stylesText);
+ })
+ // create file dropbox
+ .then(zipedText => {
+ messageProgressBar({title: title, text: t('uploadingFile')});
+ return uploadFileDropbox(client, zipedText);
+ })
+ // gives feedback to user
+ .then(() => messageProgressBar({title: title, text: t('exportSavedSuccess')}))
+ // handle not found cases and cancel action
+ .catch(error => {
+ console.log(error);
+ // saving file first time
+ if (error.status === API_ERROR_STATUS_FILE_NOT_FOUND) {
+ API.getStyles()
+ .then(styles => {
+ messageProgressBar({title: title, text: t('gettingStyles')});
+ return JSON.stringify(styles, null, '\t');
+ })
+ .then(stylesText => {
+ messageProgressBar({title: title, text: t('zipStyles')});
+ return createZipFileFromText(FILENAME_ZIP_FILE, stylesText);
+ })
+ .then(zipedText => {
+ messageProgressBar({title: title, text: t('uploadingFile')});
+ return uploadFileDropbox(client, zipedText);
+ })
+ .then(() => messageProgressBar({title: title, text: t('exportSavedSuccess')}))
+ .catch(err => messageBox.alert(err));
+ return;
+ }
+
+ // user cancelled the flow
+ if (error.status === HTTP_STATUS_CANCEL) {
+ return;
+ }
+
+ console.error(error);
+ });
+ });
+};
+
+$('#sync-dropbox-import').onclick = () => {
+ const title = t('retrieveDropboxSync');
+ messageProgressBar({title: title, text: t('connectingDropbox')});
+
+ hasDropboxAccessToken()
+ .then(token => token || requestDropboxAccessToken())
+ .then(token => {
+ const client = new Dropbox.Dropbox({
+ clientId: DROPBOX_API_KEY,
+ accessToken: token
+ });
+ return client.filesDownload({path: '/' + DROPBOX_FILE})
+ .then(response => {
+ messageProgressBar({title: title, text: t('unzipStyles')});
+ return readZipFileFromBlob(response.fileBlob);
+ })
+ .then(zipedFileBlob => {
+ messageProgressBar({title: title, text: t('readingStyles')});
+ document.body.style.cursor = 'wait';
+ const fReader = new FileReader();
+ fReader.onloadend = event => {
+ const text = event.target.result;
+ const maybeUsercss = !/^[\s\r\n]*\[/.test(text) &&
+ (text.includes('==UserStyle==') || /==UserStyle==/i.test(text));
+ (!maybeUsercss ?
+ importFromString(text) :
+ getOwnTab().then(tab => {
+ tab.url = URL.createObjectURL(new Blob([text], {type: 'text/css'}));
+ return API.installUsercss({direct: true, tab})
+ .then(() => URL.revokeObjectURL(tab.url));
+ })
+ ).then(numStyles => {
+ document.body.style.cursor = '';
+ resolve(numStyles);
+ });
+ };
+ fReader.readAsText(zipedFileBlob, 'utf-8');
+ })
+ .catch(error => {
+ // no file
+ if (error.status === API_ERROR_STATUS_FILE_NOT_FOUND) {
+ messageBox.alert(t('noFileToImport'));
+ return;
+ }
+ messageBox.alert(error);
+ });
+ });
+};
diff --git a/sync/vendor/dropbox/LICENSE b/sync/vendor/dropbox/LICENSE
new file mode 100755
index 00000000..c06e3c4a
--- /dev/null
+++ b/sync/vendor/dropbox/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2016 Dropbox Inc., http://www.dropbox.com/
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/sync/vendor/dropbox/dropbox-sdk.js b/sync/vendor/dropbox/dropbox-sdk.js
new file mode 100644
index 00000000..3defc663
--- /dev/null
+++ b/sync/vendor/dropbox/dropbox-sdk.js
@@ -0,0 +1,5190 @@
+(function (global, factory) {
+ typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
+ typeof define === 'function' && define.amd ? define(factory) :
+ (global.Dropbox = factory());
+}(this, (function () { 'use strict';
+
+// Auto-generated by Stone, do not modify.
+var routes = {};
+
+/**
+ * Creates an OAuth 2.0 access token from the supplied OAuth 1.0 access token.
+ * @function Dropbox#authTokenFromOauth1
+ * @arg {AuthTokenFromOAuth1Arg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.authTokenFromOauth1 = function (arg) {
+ return this.request('auth/token/from_oauth1', arg, 'app', 'api', 'rpc');
+};
+
+/**
+ * Disables the access token used to authenticate the call.
+ * @function Dropbox#authTokenRevoke
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.authTokenRevoke = function (arg) {
+ return this.request('auth/token/revoke', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Add property groups to a Dropbox file. See templates/add_for_user or
+ * templates/add_for_team to create new templates.
+ * @function Dropbox#filePropertiesPropertiesAdd
+ * @arg {FilePropertiesAddPropertiesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesPropertiesAdd = function (arg) {
+ return this.request('file_properties/properties/add', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Overwrite property groups associated with a file. This endpoint should be
+ * used instead of properties/update when property groups are being updated via
+ * a "snapshot" instead of via a "delta". In other words, this endpoint will
+ * delete all omitted fields from a property group, whereas properties/update
+ * will only delete fields that are explicitly marked for deletion.
+ * @function Dropbox#filePropertiesPropertiesOverwrite
+ * @arg {FilePropertiesOverwritePropertyGroupArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesPropertiesOverwrite = function (arg) {
+ return this.request('file_properties/properties/overwrite', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Permanently removes the specified property group from the file. To remove
+ * specific property field key value pairs, see properties/update. To update a
+ * template, see templates/update_for_user or templates/update_for_team. To
+ * remove a template, see templates/remove_for_user or
+ * templates/remove_for_team.
+ * @function Dropbox#filePropertiesPropertiesRemove
+ * @arg {FilePropertiesRemovePropertiesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesPropertiesRemove = function (arg) {
+ return this.request('file_properties/properties/remove', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Search across property templates for particular property field values.
+ * @function Dropbox#filePropertiesPropertiesSearch
+ * @arg {FilePropertiesPropertiesSearchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesPropertiesSearch = function (arg) {
+ return this.request('file_properties/properties/search', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from properties/search, use this to paginate
+ * through all search results.
+ * @function Dropbox#filePropertiesPropertiesSearchContinue
+ * @arg {FilePropertiesPropertiesSearchContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesPropertiesSearchContinue = function (arg) {
+ return this.request('file_properties/properties/search/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Add, update or remove properties associated with the supplied file and
+ * templates. This endpoint should be used instead of properties/overwrite when
+ * property groups are being updated via a "delta" instead of via a "snapshot" .
+ * In other words, this endpoint will not delete any omitted fields from a
+ * property group, whereas properties/overwrite will delete any fields that are
+ * omitted from a property group.
+ * @function Dropbox#filePropertiesPropertiesUpdate
+ * @arg {FilePropertiesUpdatePropertiesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesPropertiesUpdate = function (arg) {
+ return this.request('file_properties/properties/update', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Add a template associated with a team. See properties/add to add properties
+ * to a file or folder. Note: this endpoint will create team-owned templates.
+ * @function Dropbox#filePropertiesTemplatesAddForTeam
+ * @arg {FilePropertiesAddTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesAddForTeam = function (arg) {
+ return this.request('file_properties/templates/add_for_team', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Add a template associated with a user. See properties/add to add properties
+ * to a file. This endpoint can't be called on a team member or admin's behalf.
+ * @function Dropbox#filePropertiesTemplatesAddForUser
+ * @arg {FilePropertiesAddTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesAddForUser = function (arg) {
+ return this.request('file_properties/templates/add_for_user', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get the schema for a specified template.
+ * @function Dropbox#filePropertiesTemplatesGetForTeam
+ * @arg {FilePropertiesGetTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesGetForTeam = function (arg) {
+ return this.request('file_properties/templates/get_for_team', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Get the schema for a specified template. This endpoint can't be called on a
+ * team member or admin's behalf.
+ * @function Dropbox#filePropertiesTemplatesGetForUser
+ * @arg {FilePropertiesGetTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesGetForUser = function (arg) {
+ return this.request('file_properties/templates/get_for_user', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get the template identifiers for a team. To get the schema of each template
+ * use templates/get_for_team.
+ * @function Dropbox#filePropertiesTemplatesListForTeam
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesListForTeam = function (arg) {
+ return this.request('file_properties/templates/list_for_team', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Get the template identifiers for a team. To get the schema of each template
+ * use templates/get_for_user. This endpoint can't be called on a team member or
+ * admin's behalf.
+ * @function Dropbox#filePropertiesTemplatesListForUser
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesListForUser = function (arg) {
+ return this.request('file_properties/templates/list_for_user', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Permanently removes the specified template created from
+ * templates/add_for_user. All properties associated with the template will also
+ * be removed. This action cannot be undone.
+ * @function Dropbox#filePropertiesTemplatesRemoveForTeam
+ * @arg {FilePropertiesRemoveTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesRemoveForTeam = function (arg) {
+ return this.request('file_properties/templates/remove_for_team', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Permanently removes the specified template created from
+ * templates/add_for_user. All properties associated with the template will also
+ * be removed. This action cannot be undone.
+ * @function Dropbox#filePropertiesTemplatesRemoveForUser
+ * @arg {FilePropertiesRemoveTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesRemoveForUser = function (arg) {
+ return this.request('file_properties/templates/remove_for_user', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Update a template associated with a team. This route can update the template
+ * name, the template description and add optional properties to templates.
+ * @function Dropbox#filePropertiesTemplatesUpdateForTeam
+ * @arg {FilePropertiesUpdateTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesUpdateForTeam = function (arg) {
+ return this.request('file_properties/templates/update_for_team', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Update a template associated with a user. This route can update the template
+ * name, the template description and add optional properties to templates. This
+ * endpoint can't be called on a team member or admin's behalf.
+ * @function Dropbox#filePropertiesTemplatesUpdateForUser
+ * @arg {FilePropertiesUpdateTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filePropertiesTemplatesUpdateForUser = function (arg) {
+ return this.request('file_properties/templates/update_for_user', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Creates a file request for this user.
+ * @function Dropbox#fileRequestsCreate
+ * @arg {FileRequestsCreateFileRequestArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.fileRequestsCreate = function (arg) {
+ return this.request('file_requests/create', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the specified file request.
+ * @function Dropbox#fileRequestsGet
+ * @arg {FileRequestsGetFileRequestArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.fileRequestsGet = function (arg) {
+ return this.request('file_requests/get', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns a list of file requests owned by this user. For apps with the app
+ * folder permission, this will only return file requests with destinations in
+ * the app folder.
+ * @function Dropbox#fileRequestsList
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.fileRequestsList = function (arg) {
+ return this.request('file_requests/list', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Update a file request.
+ * @function Dropbox#fileRequestsUpdate
+ * @arg {FileRequestsUpdateFileRequestArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.fileRequestsUpdate = function (arg) {
+ return this.request('file_requests/update', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the metadata for a file or folder. This is an alpha endpoint
+ * compatible with the properties API. Note: Metadata for the root folder is
+ * unsupported.
+ * @function Dropbox#filesAlphaGetMetadata
+ * @deprecated
+ * @arg {FilesAlphaGetMetadataArg} arg - The request parameters.
+ * @returns {Promise.<(FilesFileMetadata|FilesFolderMetadata|FilesDeletedMetadata), Error.>}
+ */
+routes.filesAlphaGetMetadata = function (arg) {
+ return this.request('files/alpha/get_metadata', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Create a new file with the contents provided in the request. Note that this
+ * endpoint is part of the properties API alpha and is slightly different from
+ * upload. Do not use this to upload a file larger than 150 MB. Instead, create
+ * an upload session with upload_session/start.
+ * @function Dropbox#filesAlphaUpload
+ * @deprecated
+ * @arg {FilesCommitInfoWithProperties} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesAlphaUpload = function (arg) {
+ return this.request('files/alpha/upload', arg, 'user', 'content', 'upload');
+};
+
+/**
+ * Copy a file or folder to a different location in the user's Dropbox. If the
+ * source path is a folder all its contents will be copied.
+ * @function Dropbox#filesCopy
+ * @deprecated
+ * @arg {FilesRelocationArg} arg - The request parameters.
+ * @returns {Promise.<(FilesFileMetadata|FilesFolderMetadata|FilesDeletedMetadata), Error.>}
+ */
+routes.filesCopy = function (arg) {
+ return this.request('files/copy', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Copy multiple files or folders to different locations at once in the user's
+ * Dropbox. If RelocationBatchArg.allow_shared_folder is false, this route is
+ * atomic. If on entry failes, the whole transaction will abort. If
+ * RelocationBatchArg.allow_shared_folder is true, not atomicity is guaranteed,
+ * but you will be able to copy the contents of shared folders to new locations.
+ * This route will return job ID immediately and do the async copy job in
+ * background. Please use copy_batch/check to check the job status.
+ * @function Dropbox#filesCopyBatch
+ * @arg {FilesRelocationBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCopyBatch = function (arg) {
+ return this.request('files/copy_batch', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job for copy_batch. If success, it
+ * returns list of results for each entry.
+ * @function Dropbox#filesCopyBatchCheck
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCopyBatchCheck = function (arg) {
+ return this.request('files/copy_batch/check', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get a copy reference to a file or folder. This reference string can be used
+ * to save that file or folder to another user's Dropbox by passing it to
+ * copy_reference/save.
+ * @function Dropbox#filesCopyReferenceGet
+ * @arg {FilesGetCopyReferenceArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCopyReferenceGet = function (arg) {
+ return this.request('files/copy_reference/get', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Save a copy reference returned by copy_reference/get to the user's Dropbox.
+ * @function Dropbox#filesCopyReferenceSave
+ * @arg {FilesSaveCopyReferenceArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCopyReferenceSave = function (arg) {
+ return this.request('files/copy_reference/save', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Copy a file or folder to a different location in the user's Dropbox. If the
+ * source path is a folder all its contents will be copied.
+ * @function Dropbox#filesCopyV2
+ * @arg {FilesRelocationArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCopyV2 = function (arg) {
+ return this.request('files/copy_v2', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Create a folder at a given path.
+ * @function Dropbox#filesCreateFolder
+ * @deprecated
+ * @arg {FilesCreateFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCreateFolder = function (arg) {
+ return this.request('files/create_folder', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Create multiple folders at once. This route is asynchronous for large
+ * batches, which returns a job ID immediately and runs the create folder batch
+ * asynchronously. Otherwise, creates the folders and returns the result
+ * synchronously for smaller inputs. You can force asynchronous behaviour by
+ * using the CreateFolderBatchArg.force_async flag. Use
+ * create_folder_batch/check to check the job status.
+ * @function Dropbox#filesCreateFolderBatch
+ * @arg {FilesCreateFolderBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCreateFolderBatch = function (arg) {
+ return this.request('files/create_folder_batch', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job for create_folder_batch. If
+ * success, it returns list of result for each entry.
+ * @function Dropbox#filesCreateFolderBatchCheck
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCreateFolderBatchCheck = function (arg) {
+ return this.request('files/create_folder_batch/check', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Create a folder at a given path.
+ * @function Dropbox#filesCreateFolderV2
+ * @arg {FilesCreateFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesCreateFolderV2 = function (arg) {
+ return this.request('files/create_folder_v2', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Delete the file or folder at a given path. If the path is a folder, all its
+ * contents will be deleted too. A successful response indicates that the file
+ * or folder was deleted. The returned metadata will be the corresponding
+ * FileMetadata or FolderMetadata for the item at time of deletion, and not a
+ * DeletedMetadata object.
+ * @function Dropbox#filesDelete
+ * @deprecated
+ * @arg {FilesDeleteArg} arg - The request parameters.
+ * @returns {Promise.<(FilesFileMetadata|FilesFolderMetadata|FilesDeletedMetadata), Error.>}
+ */
+routes.filesDelete = function (arg) {
+ return this.request('files/delete', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Delete multiple files/folders at once. This route is asynchronous, which
+ * returns a job ID immediately and runs the delete batch asynchronously. Use
+ * delete_batch/check to check the job status.
+ * @function Dropbox#filesDeleteBatch
+ * @arg {FilesDeleteBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesDeleteBatch = function (arg) {
+ return this.request('files/delete_batch', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job for delete_batch. If success, it
+ * returns list of result for each entry.
+ * @function Dropbox#filesDeleteBatchCheck
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesDeleteBatchCheck = function (arg) {
+ return this.request('files/delete_batch/check', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Delete the file or folder at a given path. If the path is a folder, all its
+ * contents will be deleted too. A successful response indicates that the file
+ * or folder was deleted. The returned metadata will be the corresponding
+ * FileMetadata or FolderMetadata for the item at time of deletion, and not a
+ * DeletedMetadata object.
+ * @function Dropbox#filesDeleteV2
+ * @arg {FilesDeleteArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesDeleteV2 = function (arg) {
+ return this.request('files/delete_v2', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Download a file from a user's Dropbox.
+ * @function Dropbox#filesDownload
+ * @arg {FilesDownloadArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesDownload = function (arg) {
+ return this.request('files/download', arg, 'user', 'content', 'download');
+};
+
+/**
+ * Download a folder from the user's Dropbox, as a zip file. The folder must be
+ * less than 1 GB in size and have fewer than 10,000 total files. The input
+ * cannot be a single file.
+ * @function Dropbox#filesDownloadZip
+ * @arg {FilesDownloadZipArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesDownloadZip = function (arg) {
+ return this.request('files/download_zip', arg, 'user', 'content', 'download');
+};
+
+/**
+ * Returns the metadata for a file or folder. Note: Metadata for the root folder
+ * is unsupported.
+ * @function Dropbox#filesGetMetadata
+ * @arg {FilesGetMetadataArg} arg - The request parameters.
+ * @returns {Promise.<(FilesFileMetadata|FilesFolderMetadata|FilesDeletedMetadata), Error.>}
+ */
+routes.filesGetMetadata = function (arg) {
+ return this.request('files/get_metadata', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get a preview for a file. Currently, PDF previews are generated for files
+ * with the following extensions: .ai, .doc, .docm, .docx, .eps, .odp, .odt,
+ * .pps, .ppsm, .ppsx, .ppt, .pptm, .pptx, .rtf. HTML previews are generated for
+ * files with the following extensions: .csv, .ods, .xls, .xlsm, .xlsx. Other
+ * formats will return an unsupported extension error.
+ * @function Dropbox#filesGetPreview
+ * @arg {FilesPreviewArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesGetPreview = function (arg) {
+ return this.request('files/get_preview', arg, 'user', 'content', 'download');
+};
+
+/**
+ * Get a temporary link to stream content of a file. This link will expire in
+ * four hours and afterwards you will get 410 Gone. Content-Type of the link is
+ * determined automatically by the file's mime type.
+ * @function Dropbox#filesGetTemporaryLink
+ * @arg {FilesGetTemporaryLinkArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesGetTemporaryLink = function (arg) {
+ return this.request('files/get_temporary_link', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get a thumbnail for an image. This method currently supports files with the
+ * following file extensions: jpg, jpeg, png, tiff, tif, gif and bmp. Photos
+ * that are larger than 20MB in size won't be converted to a thumbnail.
+ * @function Dropbox#filesGetThumbnail
+ * @arg {FilesThumbnailArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesGetThumbnail = function (arg) {
+ return this.request('files/get_thumbnail', arg, 'user', 'content', 'download');
+};
+
+/**
+ * Get thumbnails for a list of images. We allow up to 25 thumbnails in a single
+ * batch. This method currently supports files with the following file
+ * extensions: jpg, jpeg, png, tiff, tif, gif and bmp. Photos that are larger
+ * than 20MB in size won't be converted to a thumbnail.
+ * @function Dropbox#filesGetThumbnailBatch
+ * @arg {FilesGetThumbnailBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesGetThumbnailBatch = function (arg) {
+ return this.request('files/get_thumbnail_batch', arg, 'user', 'content', 'rpc');
+};
+
+/**
+ * Starts returning the contents of a folder. If the result's
+ * ListFolderResult.has_more field is true, call list_folder/continue with the
+ * returned ListFolderResult.cursor to retrieve more entries. If you're using
+ * ListFolderArg.recursive set to true to keep a local cache of the contents of
+ * a Dropbox account, iterate through each entry in order and process them as
+ * follows to keep your local state in sync: For each FileMetadata, store the
+ * new entry at the given path in your local state. If the required parent
+ * folders don't exist yet, create them. If there's already something else at
+ * the given path, replace it and remove all its children. For each
+ * FolderMetadata, store the new entry at the given path in your local state. If
+ * the required parent folders don't exist yet, create them. If there's already
+ * something else at the given path, replace it but leave the children as they
+ * are. Check the new entry's FolderSharingInfo.read_only and set all its
+ * children's read-only statuses to match. For each DeletedMetadata, if your
+ * local state has something at the given path, remove it and all its children.
+ * If there's nothing at the given path, ignore this entry. Note:
+ * auth.RateLimitError may be returned if multiple list_folder or
+ * list_folder/continue calls with same parameters are made simultaneously by
+ * same API app for same user. If your app implements retry logic, please hold
+ * off the retry until the previous request finishes.
+ * @function Dropbox#filesListFolder
+ * @arg {FilesListFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesListFolder = function (arg) {
+ return this.request('files/list_folder', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from list_folder, use this to paginate
+ * through all files and retrieve updates to the folder, following the same
+ * rules as documented for list_folder.
+ * @function Dropbox#filesListFolderContinue
+ * @arg {FilesListFolderContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesListFolderContinue = function (arg) {
+ return this.request('files/list_folder/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * A way to quickly get a cursor for the folder's state. Unlike list_folder,
+ * list_folder/get_latest_cursor doesn't return any entries. This endpoint is
+ * for app which only needs to know about new files and modifications and
+ * doesn't need to know about files that already exist in Dropbox.
+ * @function Dropbox#filesListFolderGetLatestCursor
+ * @arg {FilesListFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesListFolderGetLatestCursor = function (arg) {
+ return this.request('files/list_folder/get_latest_cursor', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * A longpoll endpoint to wait for changes on an account. In conjunction with
+ * list_folder/continue, this call gives you a low-latency way to monitor an
+ * account for file changes. The connection will block until there are changes
+ * available or a timeout occurs. This endpoint is useful mostly for client-side
+ * apps. If you're looking for server-side notifications, check out our webhooks
+ * documentation https://www.dropbox.com/developers/reference/webhooks.
+ * @function Dropbox#filesListFolderLongpoll
+ * @arg {FilesListFolderLongpollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesListFolderLongpoll = function (arg) {
+ return this.request('files/list_folder/longpoll', arg, 'noauth', 'notify', 'rpc');
+};
+
+/**
+ * Returns revisions for files based on a file path or a file id. The file path
+ * or file id is identified from the latest file entry at the given file path or
+ * id. This end point allows your app to query either by file path or file id by
+ * setting the mode parameter appropriately. In the ListRevisionsMode.path
+ * (default) mode, all revisions at the same file path as the latest file entry
+ * are returned. If revisions with the same file id are desired, then mode must
+ * be set to ListRevisionsMode.id. The ListRevisionsMode.id mode is useful to
+ * retrieve revisions for a given file across moves or renames.
+ * @function Dropbox#filesListRevisions
+ * @arg {FilesListRevisionsArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesListRevisions = function (arg) {
+ return this.request('files/list_revisions', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Move a file or folder to a different location in the user's Dropbox. If the
+ * source path is a folder all its contents will be moved.
+ * @function Dropbox#filesMove
+ * @deprecated
+ * @arg {FilesRelocationArg} arg - The request parameters.
+ * @returns {Promise.<(FilesFileMetadata|FilesFolderMetadata|FilesDeletedMetadata), Error.>}
+ */
+routes.filesMove = function (arg) {
+ return this.request('files/move', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Move multiple files or folders to different locations at once in the user's
+ * Dropbox. This route is 'all or nothing', which means if one entry fails, the
+ * whole transaction will abort. This route will return job ID immediately and
+ * do the async moving job in background. Please use move_batch/check to check
+ * the job status.
+ * @function Dropbox#filesMoveBatch
+ * @arg {FilesRelocationBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesMoveBatch = function (arg) {
+ return this.request('files/move_batch', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job for move_batch. If success, it
+ * returns list of results for each entry.
+ * @function Dropbox#filesMoveBatchCheck
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesMoveBatchCheck = function (arg) {
+ return this.request('files/move_batch/check', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Move a file or folder to a different location in the user's Dropbox. If the
+ * source path is a folder all its contents will be moved.
+ * @function Dropbox#filesMoveV2
+ * @arg {FilesRelocationArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesMoveV2 = function (arg) {
+ return this.request('files/move_v2', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Permanently delete the file or folder at a given path (see
+ * https://www.dropbox.com/en/help/40). Note: This endpoint is only available
+ * for Dropbox Business apps.
+ * @function Dropbox#filesPermanentlyDelete
+ * @arg {FilesDeleteArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesPermanentlyDelete = function (arg) {
+ return this.request('files/permanently_delete', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * @function Dropbox#filesPropertiesAdd
+ * @deprecated
+ * @arg {FilePropertiesAddPropertiesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesPropertiesAdd = function (arg) {
+ return this.request('files/properties/add', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * @function Dropbox#filesPropertiesOverwrite
+ * @deprecated
+ * @arg {FilePropertiesOverwritePropertyGroupArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesPropertiesOverwrite = function (arg) {
+ return this.request('files/properties/overwrite', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * @function Dropbox#filesPropertiesRemove
+ * @deprecated
+ * @arg {FilePropertiesRemovePropertiesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesPropertiesRemove = function (arg) {
+ return this.request('files/properties/remove', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * @function Dropbox#filesPropertiesTemplateGet
+ * @deprecated
+ * @arg {FilePropertiesGetTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesPropertiesTemplateGet = function (arg) {
+ return this.request('files/properties/template/get', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * @function Dropbox#filesPropertiesTemplateList
+ * @deprecated
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesPropertiesTemplateList = function (arg) {
+ return this.request('files/properties/template/list', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * @function Dropbox#filesPropertiesUpdate
+ * @deprecated
+ * @arg {FilePropertiesUpdatePropertiesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesPropertiesUpdate = function (arg) {
+ return this.request('files/properties/update', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Restore a file to a specific revision.
+ * @function Dropbox#filesRestore
+ * @arg {FilesRestoreArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesRestore = function (arg) {
+ return this.request('files/restore', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Save a specified URL into a file in user's Dropbox. If the given path already
+ * exists, the file will be renamed to avoid the conflict (e.g. myfile (1).txt).
+ * @function Dropbox#filesSaveUrl
+ * @arg {FilesSaveUrlArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesSaveUrl = function (arg) {
+ return this.request('files/save_url', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Check the status of a save_url job.
+ * @function Dropbox#filesSaveUrlCheckJobStatus
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesSaveUrlCheckJobStatus = function (arg) {
+ return this.request('files/save_url/check_job_status', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Searches for files and folders. Note: Recent changes may not immediately be
+ * reflected in search results due to a short delay in indexing.
+ * @function Dropbox#filesSearch
+ * @arg {FilesSearchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesSearch = function (arg) {
+ return this.request('files/search', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Create a new file with the contents provided in the request. Do not use this
+ * to upload a file larger than 150 MB. Instead, create an upload session with
+ * upload_session/start.
+ * @function Dropbox#filesUpload
+ * @arg {FilesCommitInfo} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesUpload = function (arg) {
+ return this.request('files/upload', arg, 'user', 'content', 'upload');
+};
+
+/**
+ * Append more data to an upload session. A single request should not upload
+ * more than 150 MB. The maximum size of a file one can upload to an upload
+ * session is 350 GB.
+ * @function Dropbox#filesUploadSessionAppend
+ * @deprecated
+ * @arg {FilesUploadSessionCursor} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesUploadSessionAppend = function (arg) {
+ return this.request('files/upload_session/append', arg, 'user', 'content', 'upload');
+};
+
+/**
+ * Append more data to an upload session. When the parameter close is set, this
+ * call will close the session. A single request should not upload more than 150
+ * MB. The maximum size of a file one can upload to an upload session is 350 GB.
+ * @function Dropbox#filesUploadSessionAppendV2
+ * @arg {FilesUploadSessionAppendArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesUploadSessionAppendV2 = function (arg) {
+ return this.request('files/upload_session/append_v2', arg, 'user', 'content', 'upload');
+};
+
+/**
+ * Finish an upload session and save the uploaded data to the given file path. A
+ * single request should not upload more than 150 MB. The maximum size of a file
+ * one can upload to an upload session is 350 GB.
+ * @function Dropbox#filesUploadSessionFinish
+ * @arg {FilesUploadSessionFinishArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesUploadSessionFinish = function (arg) {
+ return this.request('files/upload_session/finish', arg, 'user', 'content', 'upload');
+};
+
+/**
+ * This route helps you commit many files at once into a user's Dropbox. Use
+ * upload_session/start and upload_session/append_v2 to upload file contents. We
+ * recommend uploading many files in parallel to increase throughput. Once the
+ * file contents have been uploaded, rather than calling upload_session/finish,
+ * use this route to finish all your upload sessions in a single request.
+ * UploadSessionStartArg.close or UploadSessionAppendArg.close needs to be true
+ * for the last upload_session/start or upload_session/append_v2 call. The
+ * maximum size of a file one can upload to an upload session is 350 GB. This
+ * route will return a job_id immediately and do the async commit job in
+ * background. Use upload_session/finish_batch/check to check the job status.
+ * For the same account, this route should be executed serially. That means you
+ * should not start the next job before current job finishes. We allow up to
+ * 1000 entries in a single request.
+ * @function Dropbox#filesUploadSessionFinishBatch
+ * @arg {FilesUploadSessionFinishBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesUploadSessionFinishBatch = function (arg) {
+ return this.request('files/upload_session/finish_batch', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job for upload_session/finish_batch. If
+ * success, it returns list of result for each entry.
+ * @function Dropbox#filesUploadSessionFinishBatchCheck
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesUploadSessionFinishBatchCheck = function (arg) {
+ return this.request('files/upload_session/finish_batch/check', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Upload sessions allow you to upload a single file in one or more requests,
+ * for example where the size of the file is greater than 150 MB. This call
+ * starts a new upload session with the given data. You can then use
+ * upload_session/append_v2 to add more data and upload_session/finish to save
+ * all the data to a file in Dropbox. A single request should not upload more
+ * than 150 MB. The maximum size of a file one can upload to an upload session
+ * is 350 GB. An upload session can be used for a maximum of 48 hours.
+ * Attempting to use an UploadSessionStartResult.session_id with
+ * upload_session/append_v2 or upload_session/finish more than 48 hours after
+ * its creation will return a UploadSessionLookupError.not_found.
+ * @function Dropbox#filesUploadSessionStart
+ * @arg {FilesUploadSessionStartArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.filesUploadSessionStart = function (arg) {
+ return this.request('files/upload_session/start', arg, 'user', 'content', 'upload');
+};
+
+/**
+ * Marks the given Paper doc as archived. Note: This action can be performed or
+ * undone by anyone with edit permissions to the doc.
+ * @function Dropbox#paperDocsArchive
+ * @arg {PaperRefPaperDoc} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsArchive = function (arg) {
+ return this.request('paper/docs/archive', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Creates a new Paper doc with the provided content.
+ * @function Dropbox#paperDocsCreate
+ * @arg {PaperPaperDocCreateArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsCreate = function (arg) {
+ return this.request('paper/docs/create', arg, 'user', 'api', 'upload');
+};
+
+/**
+ * Exports and downloads Paper doc either as HTML or markdown.
+ * @function Dropbox#paperDocsDownload
+ * @arg {PaperPaperDocExport} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsDownload = function (arg) {
+ return this.request('paper/docs/download', arg, 'user', 'api', 'download');
+};
+
+/**
+ * Lists the users who are explicitly invited to the Paper folder in which the
+ * Paper doc is contained. For private folders all users (including owner)
+ * shared on the folder are listed and for team folders all non-team users
+ * shared on the folder are returned.
+ * @function Dropbox#paperDocsFolderUsersList
+ * @arg {PaperListUsersOnFolderArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsFolderUsersList = function (arg) {
+ return this.request('paper/docs/folder_users/list', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from docs/folder_users/list, use this to
+ * paginate through all users on the Paper folder.
+ * @function Dropbox#paperDocsFolderUsersListContinue
+ * @arg {PaperListUsersOnFolderContinueArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsFolderUsersListContinue = function (arg) {
+ return this.request('paper/docs/folder_users/list/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Retrieves folder information for the given Paper doc. This includes: -
+ * folder sharing policy; permissions for subfolders are set by the top-level
+ * folder. - full 'filepath', i.e. the list of folders (both folderId and
+ * folderName) from the root folder to the folder directly containing the
+ * Paper doc. Note: If the Paper doc is not in any folder (aka unfiled) the
+ * response will be empty.
+ * @function Dropbox#paperDocsGetFolderInfo
+ * @arg {PaperRefPaperDoc} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsGetFolderInfo = function (arg) {
+ return this.request('paper/docs/get_folder_info', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Return the list of all Paper docs according to the argument specifications.
+ * To iterate over through the full pagination, pass the cursor to
+ * docs/list/continue.
+ * @function Dropbox#paperDocsList
+ * @arg {PaperListPaperDocsArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsList = function (arg) {
+ return this.request('paper/docs/list', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from docs/list, use this to paginate through
+ * all Paper doc.
+ * @function Dropbox#paperDocsListContinue
+ * @arg {PaperListPaperDocsContinueArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsListContinue = function (arg) {
+ return this.request('paper/docs/list/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Permanently deletes the given Paper doc. This operation is final as the doc
+ * cannot be recovered. Note: This action can be performed only by the doc
+ * owner.
+ * @function Dropbox#paperDocsPermanentlyDelete
+ * @arg {PaperRefPaperDoc} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsPermanentlyDelete = function (arg) {
+ return this.request('paper/docs/permanently_delete', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Gets the default sharing policy for the given Paper doc.
+ * @function Dropbox#paperDocsSharingPolicyGet
+ * @arg {PaperRefPaperDoc} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsSharingPolicyGet = function (arg) {
+ return this.request('paper/docs/sharing_policy/get', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Sets the default sharing policy for the given Paper doc. The default
+ * 'team_sharing_policy' can be changed only by teams, omit this field for
+ * personal accounts. Note: 'public_sharing_policy' cannot be set to the value
+ * 'disabled' because this setting can be changed only via the team admin
+ * console.
+ * @function Dropbox#paperDocsSharingPolicySet
+ * @arg {PaperPaperDocSharingPolicy} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsSharingPolicySet = function (arg) {
+ return this.request('paper/docs/sharing_policy/set', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Updates an existing Paper doc with the provided content.
+ * @function Dropbox#paperDocsUpdate
+ * @arg {PaperPaperDocUpdateArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsUpdate = function (arg) {
+ return this.request('paper/docs/update', arg, 'user', 'api', 'upload');
+};
+
+/**
+ * Allows an owner or editor to add users to a Paper doc or change their
+ * permissions using their email address or Dropbox account ID. Note: The Doc
+ * owner's permissions cannot be changed.
+ * @function Dropbox#paperDocsUsersAdd
+ * @arg {PaperAddPaperDocUser} arg - The request parameters.
+ * @returns {Promise., Error.>}
+ */
+routes.paperDocsUsersAdd = function (arg) {
+ return this.request('paper/docs/users/add', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Lists all users who visited the Paper doc or users with explicit access. This
+ * call excludes users who have been removed. The list is sorted by the date of
+ * the visit or the share date. The list will include both users, the explicitly
+ * shared ones as well as those who came in using the Paper url link.
+ * @function Dropbox#paperDocsUsersList
+ * @arg {PaperListUsersOnPaperDocArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsUsersList = function (arg) {
+ return this.request('paper/docs/users/list', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from docs/users/list, use this to paginate
+ * through all users on the Paper doc.
+ * @function Dropbox#paperDocsUsersListContinue
+ * @arg {PaperListUsersOnPaperDocContinueArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsUsersListContinue = function (arg) {
+ return this.request('paper/docs/users/list/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Allows an owner or editor to remove users from a Paper doc using their email
+ * address or Dropbox account ID. Note: Doc owner cannot be removed.
+ * @function Dropbox#paperDocsUsersRemove
+ * @arg {PaperRemovePaperDocUser} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.paperDocsUsersRemove = function (arg) {
+ return this.request('paper/docs/users/remove', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Adds specified members to a file.
+ * @function Dropbox#sharingAddFileMember
+ * @arg {SharingAddFileMemberArgs} arg - The request parameters.
+ * @returns {Promise., Error.>}
+ */
+routes.sharingAddFileMember = function (arg) {
+ return this.request('sharing/add_file_member', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Allows an owner or editor (if the ACL update policy allows) of a shared
+ * folder to add another member. For the new member to get access to all the
+ * functionality for this folder, you will need to call mount_folder on their
+ * behalf. Apps must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingAddFolderMember
+ * @arg {SharingAddFolderMemberArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingAddFolderMember = function (arg) {
+ return this.request('sharing/add_folder_member', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Identical to update_file_member but with less information returned.
+ * @function Dropbox#sharingChangeFileMemberAccess
+ * @deprecated
+ * @arg {SharingChangeFileMemberAccessArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingChangeFileMemberAccess = function (arg) {
+ return this.request('sharing/change_file_member_access', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job. Apps must have full Dropbox access
+ * to use this endpoint.
+ * @function Dropbox#sharingCheckJobStatus
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingCheckJobStatus = function (arg) {
+ return this.request('sharing/check_job_status', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job for sharing a folder. Apps must
+ * have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingCheckRemoveMemberJobStatus
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingCheckRemoveMemberJobStatus = function (arg) {
+ return this.request('sharing/check_remove_member_job_status', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job for sharing a folder. Apps must
+ * have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingCheckShareJobStatus
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingCheckShareJobStatus = function (arg) {
+ return this.request('sharing/check_share_job_status', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Create a shared link. If a shared link already exists for the given path,
+ * that link is returned. Note that in the returned PathLinkMetadata, the
+ * PathLinkMetadata.url field is the shortened URL if
+ * CreateSharedLinkArg.short_url argument is set to true. Previously, it was
+ * technically possible to break a shared link by moving or renaming the
+ * corresponding file or folder. In the future, this will no longer be the case,
+ * so your app shouldn't rely on this behavior. Instead, if your app needs to
+ * revoke a shared link, use revoke_shared_link.
+ * @function Dropbox#sharingCreateSharedLink
+ * @deprecated
+ * @arg {SharingCreateSharedLinkArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingCreateSharedLink = function (arg) {
+ return this.request('sharing/create_shared_link', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Create a shared link with custom settings. If no settings are given then the
+ * default visibility is RequestedVisibility.public (The resolved visibility,
+ * though, may depend on other aspects such as team and shared folder settings).
+ * @function Dropbox#sharingCreateSharedLinkWithSettings
+ * @arg {SharingCreateSharedLinkWithSettingsArg} arg - The request parameters.
+ * @returns {Promise.<(SharingFileLinkMetadata|SharingFolderLinkMetadata|SharingSharedLinkMetadata), Error.>}
+ */
+routes.sharingCreateSharedLinkWithSettings = function (arg) {
+ return this.request('sharing/create_shared_link_with_settings', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns shared file metadata.
+ * @function Dropbox#sharingGetFileMetadata
+ * @arg {SharingGetFileMetadataArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingGetFileMetadata = function (arg) {
+ return this.request('sharing/get_file_metadata', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns shared file metadata.
+ * @function Dropbox#sharingGetFileMetadataBatch
+ * @arg {SharingGetFileMetadataBatchArg} arg - The request parameters.
+ * @returns {Promise., Error.>}
+ */
+routes.sharingGetFileMetadataBatch = function (arg) {
+ return this.request('sharing/get_file_metadata/batch', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns shared folder metadata by its folder ID. Apps must have full Dropbox
+ * access to use this endpoint.
+ * @function Dropbox#sharingGetFolderMetadata
+ * @arg {SharingGetMetadataArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingGetFolderMetadata = function (arg) {
+ return this.request('sharing/get_folder_metadata', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Download the shared link's file from a user's Dropbox.
+ * @function Dropbox#sharingGetSharedLinkFile
+ * @arg {Object} arg - The request parameters.
+ * @returns {Promise.<(SharingFileLinkMetadata|SharingFolderLinkMetadata|SharingSharedLinkMetadata), Error.>}
+ */
+routes.sharingGetSharedLinkFile = function (arg) {
+ return this.request('sharing/get_shared_link_file', arg, 'user', 'content', 'download');
+};
+
+/**
+ * Get the shared link's metadata.
+ * @function Dropbox#sharingGetSharedLinkMetadata
+ * @arg {SharingGetSharedLinkMetadataArg} arg - The request parameters.
+ * @returns {Promise.<(SharingFileLinkMetadata|SharingFolderLinkMetadata|SharingSharedLinkMetadata), Error.>}
+ */
+routes.sharingGetSharedLinkMetadata = function (arg) {
+ return this.request('sharing/get_shared_link_metadata', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns a list of LinkMetadata objects for this user, including collection
+ * links. If no path is given, returns a list of all shared links for the
+ * current user, including collection links, up to a maximum of 1000 links. If a
+ * non-empty path is given, returns a list of all shared links that allow access
+ * to the given path. Collection links are never returned in this case. Note
+ * that the url field in the response is never the shortened URL.
+ * @function Dropbox#sharingGetSharedLinks
+ * @deprecated
+ * @arg {SharingGetSharedLinksArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingGetSharedLinks = function (arg) {
+ return this.request('sharing/get_shared_links', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Use to obtain the members who have been invited to a file, both inherited and
+ * uninherited members.
+ * @function Dropbox#sharingListFileMembers
+ * @arg {SharingListFileMembersArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListFileMembers = function (arg) {
+ return this.request('sharing/list_file_members', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get members of multiple files at once. The arguments to this route are more
+ * limited, and the limit on query result size per file is more strict. To
+ * customize the results more, use the individual file endpoint. Inherited users
+ * and groups are not included in the result, and permissions are not returned
+ * for this endpoint.
+ * @function Dropbox#sharingListFileMembersBatch
+ * @arg {SharingListFileMembersBatchArg} arg - The request parameters.
+ * @returns {Promise., Error.>}
+ */
+routes.sharingListFileMembersBatch = function (arg) {
+ return this.request('sharing/list_file_members/batch', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from list_file_members or
+ * list_file_members/batch, use this to paginate through all shared file
+ * members.
+ * @function Dropbox#sharingListFileMembersContinue
+ * @arg {SharingListFileMembersContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListFileMembersContinue = function (arg) {
+ return this.request('sharing/list_file_members/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns shared folder membership by its folder ID. Apps must have full
+ * Dropbox access to use this endpoint.
+ * @function Dropbox#sharingListFolderMembers
+ * @arg {SharingListFolderMembersArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListFolderMembers = function (arg) {
+ return this.request('sharing/list_folder_members', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from list_folder_members, use this to
+ * paginate through all shared folder members. Apps must have full Dropbox
+ * access to use this endpoint.
+ * @function Dropbox#sharingListFolderMembersContinue
+ * @arg {SharingListFolderMembersContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListFolderMembersContinue = function (arg) {
+ return this.request('sharing/list_folder_members/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Return the list of all shared folders the current user has access to. Apps
+ * must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingListFolders
+ * @arg {SharingListFoldersArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListFolders = function (arg) {
+ return this.request('sharing/list_folders', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from list_folders, use this to paginate
+ * through all shared folders. The cursor must come from a previous call to
+ * list_folders or list_folders/continue. Apps must have full Dropbox access to
+ * use this endpoint.
+ * @function Dropbox#sharingListFoldersContinue
+ * @arg {SharingListFoldersContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListFoldersContinue = function (arg) {
+ return this.request('sharing/list_folders/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Return the list of all shared folders the current user can mount or unmount.
+ * Apps must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingListMountableFolders
+ * @arg {SharingListFoldersArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListMountableFolders = function (arg) {
+ return this.request('sharing/list_mountable_folders', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from list_mountable_folders, use this to
+ * paginate through all mountable shared folders. The cursor must come from a
+ * previous call to list_mountable_folders or list_mountable_folders/continue.
+ * Apps must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingListMountableFoldersContinue
+ * @arg {SharingListFoldersContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListMountableFoldersContinue = function (arg) {
+ return this.request('sharing/list_mountable_folders/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Returns a list of all files shared with current user. Does not include files
+ * the user has received via shared folders, and does not include unclaimed
+ * invitations.
+ * @function Dropbox#sharingListReceivedFiles
+ * @arg {SharingListFilesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListReceivedFiles = function (arg) {
+ return this.request('sharing/list_received_files', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get more results with a cursor from list_received_files.
+ * @function Dropbox#sharingListReceivedFilesContinue
+ * @arg {SharingListFilesContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListReceivedFilesContinue = function (arg) {
+ return this.request('sharing/list_received_files/continue', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * List shared links of this user. If no path is given, returns a list of all
+ * shared links for the current user. If a non-empty path is given, returns a
+ * list of all shared links that allow access to the given path - direct links
+ * to the given path and links to parent folders of the given path. Links to
+ * parent folders can be suppressed by setting direct_only to true.
+ * @function Dropbox#sharingListSharedLinks
+ * @arg {SharingListSharedLinksArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingListSharedLinks = function (arg) {
+ return this.request('sharing/list_shared_links', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Modify the shared link's settings. If the requested visibility conflict with
+ * the shared links policy of the team or the shared folder (in case the linked
+ * file is part of a shared folder) then the LinkPermissions.resolved_visibility
+ * of the returned SharedLinkMetadata will reflect the actual visibility of the
+ * shared link and the LinkPermissions.requested_visibility will reflect the
+ * requested visibility.
+ * @function Dropbox#sharingModifySharedLinkSettings
+ * @arg {SharingModifySharedLinkSettingsArgs} arg - The request parameters.
+ * @returns {Promise.<(SharingFileLinkMetadata|SharingFolderLinkMetadata|SharingSharedLinkMetadata), Error.>}
+ */
+routes.sharingModifySharedLinkSettings = function (arg) {
+ return this.request('sharing/modify_shared_link_settings', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * The current user mounts the designated folder. Mount a shared folder for a
+ * user after they have been added as a member. Once mounted, the shared folder
+ * will appear in their Dropbox. Apps must have full Dropbox access to use this
+ * endpoint.
+ * @function Dropbox#sharingMountFolder
+ * @arg {SharingMountFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingMountFolder = function (arg) {
+ return this.request('sharing/mount_folder', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * The current user relinquishes their membership in the designated file. Note
+ * that the current user may still have inherited access to this file through
+ * the parent folder. Apps must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingRelinquishFileMembership
+ * @arg {SharingRelinquishFileMembershipArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingRelinquishFileMembership = function (arg) {
+ return this.request('sharing/relinquish_file_membership', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * The current user relinquishes their membership in the designated shared
+ * folder and will no longer have access to the folder. A folder owner cannot
+ * relinquish membership in their own folder. This will run synchronously if
+ * leave_a_copy is false, and asynchronously if leave_a_copy is true. Apps must
+ * have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingRelinquishFolderMembership
+ * @arg {SharingRelinquishFolderMembershipArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingRelinquishFolderMembership = function (arg) {
+ return this.request('sharing/relinquish_folder_membership', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Identical to remove_file_member_2 but with less information returned.
+ * @function Dropbox#sharingRemoveFileMember
+ * @deprecated
+ * @arg {SharingRemoveFileMemberArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingRemoveFileMember = function (arg) {
+ return this.request('sharing/remove_file_member', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Removes a specified member from the file.
+ * @function Dropbox#sharingRemoveFileMember2
+ * @arg {SharingRemoveFileMemberArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingRemoveFileMember2 = function (arg) {
+ return this.request('sharing/remove_file_member_2', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Allows an owner or editor (if the ACL update policy allows) of a shared
+ * folder to remove another member. Apps must have full Dropbox access to use
+ * this endpoint.
+ * @function Dropbox#sharingRemoveFolderMember
+ * @arg {SharingRemoveFolderMemberArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingRemoveFolderMember = function (arg) {
+ return this.request('sharing/remove_folder_member', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Revoke a shared link. Note that even after revoking a shared link to a file,
+ * the file may be accessible if there are shared links leading to any of the
+ * file parent folders. To list all shared links that enable access to a
+ * specific file, you can use the list_shared_links with the file as the
+ * ListSharedLinksArg.path argument.
+ * @function Dropbox#sharingRevokeSharedLink
+ * @arg {SharingRevokeSharedLinkArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingRevokeSharedLink = function (arg) {
+ return this.request('sharing/revoke_shared_link', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Change the inheritance policy of an existing Shared Folder. Only permitted
+ * for shared folders in a shared team root. If a ShareFolderLaunch.async_job_id
+ * is returned, you'll need to call check_share_job_status until the action
+ * completes to get the metadata for the folder.
+ * @function Dropbox#sharingSetAccessInheritance
+ * @arg {SharingSetAccessInheritanceArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingSetAccessInheritance = function (arg) {
+ return this.request('sharing/set_access_inheritance', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Share a folder with collaborators. Most sharing will be completed
+ * synchronously. Large folders will be completed asynchronously. To make
+ * testing the async case repeatable, set `ShareFolderArg.force_async`. If a
+ * ShareFolderLaunch.async_job_id is returned, you'll need to call
+ * check_share_job_status until the action completes to get the metadata for the
+ * folder. Apps must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingShareFolder
+ * @arg {SharingShareFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingShareFolder = function (arg) {
+ return this.request('sharing/share_folder', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Transfer ownership of a shared folder to a member of the shared folder. User
+ * must have AccessLevel.owner access to the shared folder to perform a
+ * transfer. Apps must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingTransferFolder
+ * @arg {SharingTransferFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingTransferFolder = function (arg) {
+ return this.request('sharing/transfer_folder', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * The current user unmounts the designated folder. They can re-mount the folder
+ * at a later time using mount_folder. Apps must have full Dropbox access to use
+ * this endpoint.
+ * @function Dropbox#sharingUnmountFolder
+ * @arg {SharingUnmountFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingUnmountFolder = function (arg) {
+ return this.request('sharing/unmount_folder', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Remove all members from this file. Does not remove inherited members.
+ * @function Dropbox#sharingUnshareFile
+ * @arg {SharingUnshareFileArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingUnshareFile = function (arg) {
+ return this.request('sharing/unshare_file', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Allows a shared folder owner to unshare the folder. You'll need to call
+ * check_job_status to determine if the action has completed successfully. Apps
+ * must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingUnshareFolder
+ * @arg {SharingUnshareFolderArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingUnshareFolder = function (arg) {
+ return this.request('sharing/unshare_folder', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Changes a member's access on a shared file.
+ * @function Dropbox#sharingUpdateFileMember
+ * @arg {SharingUpdateFileMemberArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingUpdateFileMember = function (arg) {
+ return this.request('sharing/update_file_member', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Allows an owner or editor of a shared folder to update another member's
+ * permissions. Apps must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingUpdateFolderMember
+ * @arg {SharingUpdateFolderMemberArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingUpdateFolderMember = function (arg) {
+ return this.request('sharing/update_folder_member', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Update the sharing policies for a shared folder. User must have
+ * AccessLevel.owner access to the shared folder to update its policies. Apps
+ * must have full Dropbox access to use this endpoint.
+ * @function Dropbox#sharingUpdateFolderPolicy
+ * @arg {SharingUpdateFolderPolicyArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.sharingUpdateFolderPolicy = function (arg) {
+ return this.request('sharing/update_folder_policy', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Retrieves team events. Events have a lifespan of two years. Events older than
+ * two years will not be returned. Many attributes note 'may be missing due to
+ * historical data gap'. Note that the file_operations category and & analogous
+ * paper events are not available on all Dropbox Business plans
+ * /business/plans-comparison. Use features/get_values
+ * /developers/documentation/http/teams#team-features-get_values to check for
+ * this feature. Permission : Team Auditing.
+ * @function Dropbox#teamLogGetEvents
+ * @arg {TeamLogGetTeamEventsArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.teamLogGetEvents = function (arg) {
+ return this.request('team_log/get_events', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from get_events, use this to paginate
+ * through all events. Permission : Team Auditing.
+ * @function Dropbox#teamLogGetEventsContinue
+ * @arg {TeamLogGetTeamEventsContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.teamLogGetEventsContinue = function (arg) {
+ return this.request('team_log/get_events/continue', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Get information about a user's account.
+ * @function Dropbox#usersGetAccount
+ * @arg {UsersGetAccountArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.usersGetAccount = function (arg) {
+ return this.request('users/get_account', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get information about multiple user accounts. At most 300 accounts may be
+ * queried per request.
+ * @function Dropbox#usersGetAccountBatch
+ * @arg {UsersGetAccountBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.usersGetAccountBatch = function (arg) {
+ return this.request('users/get_account_batch', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get information about the current user's account.
+ * @function Dropbox#usersGetCurrentAccount
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.usersGetCurrentAccount = function (arg) {
+ return this.request('users/get_current_account', arg, 'user', 'api', 'rpc');
+};
+
+/**
+ * Get the space usage information for the current user's account.
+ * @function Dropbox#usersGetSpaceUsage
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes.usersGetSpaceUsage = function (arg) {
+ return this.request('users/get_space_usage', arg, 'user', 'api', 'rpc');
+};
+
+var RPC = 'rpc';
+var UPLOAD = 'upload';
+var DOWNLOAD = 'download';
+
+function getSafeUnicode(c) {
+ var unicode = ('000' + c.charCodeAt(0).toString(16)).slice(-4);
+ return '\\u' + unicode;
+}
+
+/* global WorkerGlobalScope */
+function isWindowOrWorker() {
+ return typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope || typeof module === 'undefined' || typeof window !== 'undefined';
+}
+
+function getBaseURL(host) {
+ return 'https://' + host + '.dropboxapi.com/2/';
+}
+
+// source https://www.dropboxforum.com/t5/API-support/HTTP-header-quot-Dropbox-API-Arg-quot-could-not-decode-input-as/m-p/173823/highlight/true#M6786
+function httpHeaderSafeJson(args) {
+ return JSON.stringify(args).replace(/[\u007f-\uffff]/g, getSafeUnicode);
+}
+
+var classCallCheck = function (instance, Constructor) {
+ if (!(instance instanceof Constructor)) {
+ throw new TypeError("Cannot call a class as a function");
+ }
+};
+
+var createClass = function () {
+ function defineProperties(target, props) {
+ for (var i = 0; i < props.length; i++) {
+ var descriptor = props[i];
+ descriptor.enumerable = descriptor.enumerable || false;
+ descriptor.configurable = true;
+ if ("value" in descriptor) descriptor.writable = true;
+ Object.defineProperty(target, descriptor.key, descriptor);
+ }
+ }
+
+ return function (Constructor, protoProps, staticProps) {
+ if (protoProps) defineProperties(Constructor.prototype, protoProps);
+ if (staticProps) defineProperties(Constructor, staticProps);
+ return Constructor;
+ };
+}();
+
+
+
+
+
+
+
+
+
+var inherits = function (subClass, superClass) {
+ if (typeof superClass !== "function" && superClass !== null) {
+ throw new TypeError("Super expression must either be null or a function, not " + typeof superClass);
+ }
+
+ subClass.prototype = Object.create(superClass && superClass.prototype, {
+ constructor: {
+ value: subClass,
+ enumerable: false,
+ writable: true,
+ configurable: true
+ }
+ });
+ if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass;
+};
+
+
+
+
+
+
+
+
+
+
+
+var possibleConstructorReturn = function (self, call) {
+ if (!self) {
+ throw new ReferenceError("this hasn't been initialised - super() hasn't been called");
+ }
+
+ return call && (typeof call === "object" || typeof call === "function") ? call : self;
+};
+
+
+
+
+
+var slicedToArray = function () {
+ function sliceIterator(arr, i) {
+ var _arr = [];
+ var _n = true;
+ var _d = false;
+ var _e = undefined;
+
+ try {
+ for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) {
+ _arr.push(_s.value);
+
+ if (i && _arr.length === i) break;
+ }
+ } catch (err) {
+ _d = true;
+ _e = err;
+ } finally {
+ try {
+ if (!_n && _i["return"]) _i["return"]();
+ } finally {
+ if (_d) throw _e;
+ }
+ }
+
+ return _arr;
+ }
+
+ return function (arr, i) {
+ if (Array.isArray(arr)) {
+ return arr;
+ } else if (Symbol.iterator in Object(arr)) {
+ return sliceIterator(arr, i);
+ } else {
+ throw new TypeError("Invalid attempt to destructure non-iterable instance");
+ }
+ };
+}();
+
+function getDataFromConsumer(res) {
+ if (!res.ok) {
+ return res.text();
+ }
+
+ return isWindowOrWorker() ? res.blob() : res.buffer();
+}
+
+function responseHandler(res, data) {
+ if (!res.ok) {
+ // eslint-disable-next-line no-throw-literal
+ throw {
+ error: data,
+ response: res,
+ status: res.status
+ };
+ }
+
+ var result = JSON.parse(res.headers.get('dropbox-api-result'));
+
+ if (isWindowOrWorker()) {
+ result.fileBlob = data;
+ } else {
+ result.fileBinary = data;
+ }
+
+ return result;
+}
+
+function downloadRequest(path, args, auth, host, accessToken, options) {
+ if (auth !== 'user') {
+ throw new Error('Unexpected auth type: ' + auth);
+ }
+
+ var fetchOptions = {
+ method: 'POST',
+ headers: {
+ Authorization: 'Bearer ' + accessToken,
+ 'Dropbox-API-Arg': httpHeaderSafeJson(args)
+ }
+ };
+
+ if (options) {
+ if (options.selectUser) {
+ fetchOptions.headers['Dropbox-API-Select-User'] = options.selectUser;
+ }
+ if (options.selectAdmin) {
+ fetchOptions.headers['Dropbox-API-Select-Admin'] = options.selectAdmin;
+ }
+ }
+
+ return fetch(getBaseURL(host) + path, fetchOptions).then(function (res) {
+ return getDataFromConsumer(res).then(function (data) {
+ return [res, data];
+ });
+ }).then(function (_ref) {
+ var _ref2 = slicedToArray(_ref, 2),
+ res = _ref2[0],
+ data = _ref2[1];
+
+ return responseHandler(res, data);
+ });
+}
+
+function parseBodyToType$1(res) {
+ var clone = res.clone();
+ return new Promise(function (resolve) {
+ res.json().then(function (data) {
+ return resolve(data);
+ }).catch(function () {
+ return clone.text().then(function (data) {
+ return resolve(data);
+ });
+ });
+ }).then(function (data) {
+ return [res, data];
+ });
+}
+
+function uploadRequest(path, args, auth, host, accessToken, options) {
+ if (auth !== 'user') {
+ throw new Error('Unexpected auth type: ' + auth);
+ }
+
+ var contents = args.contents;
+
+ delete args.contents;
+
+ var fetchOptions = {
+ body: contents,
+ method: 'POST',
+ headers: {
+ Authorization: 'Bearer ' + accessToken,
+ 'Content-Type': 'application/octet-stream',
+ 'Dropbox-API-Arg': httpHeaderSafeJson(args)
+ }
+ };
+
+ if (options) {
+ if (options.selectUser) {
+ fetchOptions.headers['Dropbox-API-Select-User'] = options.selectUser;
+ }
+ if (options.selectAdmin) {
+ fetchOptions.headers['Dropbox-API-Select-Admin'] = options.selectAdmin;
+ }
+ }
+
+ return fetch(getBaseURL(host) + path, fetchOptions).then(function (res) {
+ return parseBodyToType$1(res);
+ }).then(function (_ref) {
+ var _ref2 = slicedToArray(_ref, 2),
+ res = _ref2[0],
+ data = _ref2[1];
+
+ // maintaining existing API for error codes not equal to 200 range
+ if (!res.ok) {
+ // eslint-disable-next-line no-throw-literal
+ throw {
+ error: data,
+ response: res,
+ status: res.status
+ };
+ }
+
+ return data;
+ });
+}
+
+function createCommonjsModule(fn, module) {
+ return module = { exports: {} }, fn(module, module.exports), module.exports;
+}
+
+var byteLength_1 = byteLength;
+var toByteArray_1 = toByteArray;
+var fromByteArray_1 = fromByteArray;
+
+var lookup = [];
+var revLookup = [];
+var Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array;
+
+var code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+for (var i = 0, len = code.length; i < len; ++i) {
+ lookup[i] = code[i];
+ revLookup[code.charCodeAt(i)] = i;
+}
+
+revLookup['-'.charCodeAt(0)] = 62;
+revLookup['_'.charCodeAt(0)] = 63;
+
+function placeHoldersCount (b64) {
+ var len = b64.length;
+ if (len % 4 > 0) {
+ throw new Error('Invalid string. Length must be a multiple of 4')
+ }
+
+ // the number of equal signs (place holders)
+ // if there are two placeholders, than the two characters before it
+ // represent one byte
+ // if there is only one, then the three characters before it represent 2 bytes
+ // this is just a cheap hack to not do indexOf twice
+ return b64[len - 2] === '=' ? 2 : b64[len - 1] === '=' ? 1 : 0
+}
+
+function byteLength (b64) {
+ // base64 is 4/3 + up to two characters of the original data
+ return (b64.length * 3 / 4) - placeHoldersCount(b64)
+}
+
+function toByteArray (b64) {
+ var i, l, tmp, placeHolders, arr;
+ var len = b64.length;
+ placeHolders = placeHoldersCount(b64);
+
+ arr = new Arr((len * 3 / 4) - placeHolders);
+
+ // if there are placeholders, only get up to the last complete 4 chars
+ l = placeHolders > 0 ? len - 4 : len;
+
+ var L = 0;
+
+ for (i = 0; i < l; i += 4) {
+ tmp = (revLookup[b64.charCodeAt(i)] << 18) | (revLookup[b64.charCodeAt(i + 1)] << 12) | (revLookup[b64.charCodeAt(i + 2)] << 6) | revLookup[b64.charCodeAt(i + 3)];
+ arr[L++] = (tmp >> 16) & 0xFF;
+ arr[L++] = (tmp >> 8) & 0xFF;
+ arr[L++] = tmp & 0xFF;
+ }
+
+ if (placeHolders === 2) {
+ tmp = (revLookup[b64.charCodeAt(i)] << 2) | (revLookup[b64.charCodeAt(i + 1)] >> 4);
+ arr[L++] = tmp & 0xFF;
+ } else if (placeHolders === 1) {
+ tmp = (revLookup[b64.charCodeAt(i)] << 10) | (revLookup[b64.charCodeAt(i + 1)] << 4) | (revLookup[b64.charCodeAt(i + 2)] >> 2);
+ arr[L++] = (tmp >> 8) & 0xFF;
+ arr[L++] = tmp & 0xFF;
+ }
+
+ return arr
+}
+
+function tripletToBase64 (num) {
+ return lookup[num >> 18 & 0x3F] + lookup[num >> 12 & 0x3F] + lookup[num >> 6 & 0x3F] + lookup[num & 0x3F]
+}
+
+function encodeChunk (uint8, start, end) {
+ var tmp;
+ var output = [];
+ for (var i = start; i < end; i += 3) {
+ tmp = (uint8[i] << 16) + (uint8[i + 1] << 8) + (uint8[i + 2]);
+ output.push(tripletToBase64(tmp));
+ }
+ return output.join('')
+}
+
+function fromByteArray (uint8) {
+ var tmp;
+ var len = uint8.length;
+ var extraBytes = len % 3; // if we have 1 byte left, pad 2 bytes
+ var output = '';
+ var parts = [];
+ var maxChunkLength = 16383; // must be multiple of 3
+
+ // go through the array every three bytes, we'll deal with trailing stuff later
+ for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) {
+ parts.push(encodeChunk(uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength)));
+ }
+
+ // pad the end with zeros, but make sure to not forget the extra bytes
+ if (extraBytes === 1) {
+ tmp = uint8[len - 1];
+ output += lookup[tmp >> 2];
+ output += lookup[(tmp << 4) & 0x3F];
+ output += '==';
+ } else if (extraBytes === 2) {
+ tmp = (uint8[len - 2] << 8) + (uint8[len - 1]);
+ output += lookup[tmp >> 10];
+ output += lookup[(tmp >> 4) & 0x3F];
+ output += lookup[(tmp << 2) & 0x3F];
+ output += '=';
+ }
+
+ parts.push(output);
+
+ return parts.join('')
+}
+
+var base64Js = {
+ byteLength: byteLength_1,
+ toByteArray: toByteArray_1,
+ fromByteArray: fromByteArray_1
+};
+
+var read = function (buffer, offset, isLE, mLen, nBytes) {
+ var e, m;
+ var eLen = nBytes * 8 - mLen - 1;
+ var eMax = (1 << eLen) - 1;
+ var eBias = eMax >> 1;
+ var nBits = -7;
+ var i = isLE ? (nBytes - 1) : 0;
+ var d = isLE ? -1 : 1;
+ var s = buffer[offset + i];
+
+ i += d;
+
+ e = s & ((1 << (-nBits)) - 1);
+ s >>= (-nBits);
+ nBits += eLen;
+ for (; nBits > 0; e = e * 256 + buffer[offset + i], i += d, nBits -= 8) {}
+
+ m = e & ((1 << (-nBits)) - 1);
+ e >>= (-nBits);
+ nBits += mLen;
+ for (; nBits > 0; m = m * 256 + buffer[offset + i], i += d, nBits -= 8) {}
+
+ if (e === 0) {
+ e = 1 - eBias;
+ } else if (e === eMax) {
+ return m ? NaN : ((s ? -1 : 1) * Infinity)
+ } else {
+ m = m + Math.pow(2, mLen);
+ e = e - eBias;
+ }
+ return (s ? -1 : 1) * m * Math.pow(2, e - mLen)
+};
+
+var write = function (buffer, value, offset, isLE, mLen, nBytes) {
+ var e, m, c;
+ var eLen = nBytes * 8 - mLen - 1;
+ var eMax = (1 << eLen) - 1;
+ var eBias = eMax >> 1;
+ var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0);
+ var i = isLE ? 0 : (nBytes - 1);
+ var d = isLE ? 1 : -1;
+ var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0;
+
+ value = Math.abs(value);
+
+ if (isNaN(value) || value === Infinity) {
+ m = isNaN(value) ? 1 : 0;
+ e = eMax;
+ } else {
+ e = Math.floor(Math.log(value) / Math.LN2);
+ if (value * (c = Math.pow(2, -e)) < 1) {
+ e--;
+ c *= 2;
+ }
+ if (e + eBias >= 1) {
+ value += rt / c;
+ } else {
+ value += rt * Math.pow(2, 1 - eBias);
+ }
+ if (value * c >= 2) {
+ e++;
+ c /= 2;
+ }
+
+ if (e + eBias >= eMax) {
+ m = 0;
+ e = eMax;
+ } else if (e + eBias >= 1) {
+ m = (value * c - 1) * Math.pow(2, mLen);
+ e = e + eBias;
+ } else {
+ m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen);
+ e = 0;
+ }
+ }
+
+ for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {}
+
+ e = (e << mLen) | m;
+ eLen += mLen;
+ for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {}
+
+ buffer[offset + i - d] |= s * 128;
+};
+
+var ieee754 = {
+ read: read,
+ write: write
+};
+
+var buffer = createCommonjsModule(function (module, exports) {
+/*!
+ * The buffer module from node.js, for the browser.
+ *
+ * @author Feross Aboukhadijeh
+ * @license MIT
+ */
+/* eslint-disable no-proto */
+
+'use strict';
+
+
+
+
+exports.Buffer = Buffer;
+exports.SlowBuffer = SlowBuffer;
+exports.INSPECT_MAX_BYTES = 50;
+
+var K_MAX_LENGTH = 0x7fffffff;
+exports.kMaxLength = K_MAX_LENGTH;
+
+/**
+ * If `Buffer.TYPED_ARRAY_SUPPORT`:
+ * === true Use Uint8Array implementation (fastest)
+ * === false Print warning and recommend using `buffer` v4.x which has an Object
+ * implementation (most compatible, even IE6)
+ *
+ * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,
+ * Opera 11.6+, iOS 4.2+.
+ *
+ * We report that the browser does not support typed arrays if the are not subclassable
+ * using __proto__. Firefox 4-29 lacks support for adding new properties to `Uint8Array`
+ * (See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438). IE 10 lacks support
+ * for __proto__ and has a buggy typed array implementation.
+ */
+Buffer.TYPED_ARRAY_SUPPORT = typedArraySupport();
+
+if (!Buffer.TYPED_ARRAY_SUPPORT && typeof console !== 'undefined' &&
+ typeof console.error === 'function') {
+ console.error(
+ 'This browser lacks typed array (Uint8Array) support which is required by ' +
+ '`buffer` v5.x. Use `buffer` v4.x if you require old browser support.'
+ );
+}
+
+function typedArraySupport () {
+ // Can typed array instances can be augmented?
+ try {
+ var arr = new Uint8Array(1);
+ arr.__proto__ = {__proto__: Uint8Array.prototype, foo: function () { return 42 }};
+ return arr.foo() === 42
+ } catch (e) {
+ return false
+ }
+}
+
+function createBuffer (length) {
+ if (length > K_MAX_LENGTH) {
+ throw new RangeError('Invalid typed array length')
+ }
+ // Return an augmented `Uint8Array` instance
+ var buf = new Uint8Array(length);
+ buf.__proto__ = Buffer.prototype;
+ return buf
+}
+
+/**
+ * The Buffer constructor returns instances of `Uint8Array` that have their
+ * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of
+ * `Uint8Array`, so the returned instances will have all the node `Buffer` methods
+ * and the `Uint8Array` methods. Square bracket notation works as expected -- it
+ * returns a single octet.
+ *
+ * The `Uint8Array` prototype remains unmodified.
+ */
+
+function Buffer (arg, encodingOrOffset, length) {
+ // Common case.
+ if (typeof arg === 'number') {
+ if (typeof encodingOrOffset === 'string') {
+ throw new Error(
+ 'If encoding is specified then the first argument must be a string'
+ )
+ }
+ return allocUnsafe(arg)
+ }
+ return from(arg, encodingOrOffset, length)
+}
+
+// Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97
+if (typeof Symbol !== 'undefined' && Symbol.species &&
+ Buffer[Symbol.species] === Buffer) {
+ Object.defineProperty(Buffer, Symbol.species, {
+ value: null,
+ configurable: true,
+ enumerable: false,
+ writable: false
+ });
+}
+
+Buffer.poolSize = 8192; // not used by this implementation
+
+function from (value, encodingOrOffset, length) {
+ if (typeof value === 'number') {
+ throw new TypeError('"value" argument must not be a number')
+ }
+
+ if (isArrayBuffer(value)) {
+ return fromArrayBuffer(value, encodingOrOffset, length)
+ }
+
+ if (typeof value === 'string') {
+ return fromString(value, encodingOrOffset)
+ }
+
+ return fromObject(value)
+}
+
+/**
+ * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError
+ * if value is a number.
+ * Buffer.from(str[, encoding])
+ * Buffer.from(array)
+ * Buffer.from(buffer)
+ * Buffer.from(arrayBuffer[, byteOffset[, length]])
+ **/
+Buffer.from = function (value, encodingOrOffset, length) {
+ return from(value, encodingOrOffset, length)
+};
+
+// Note: Change prototype *after* Buffer.from is defined to workaround Chrome bug:
+// https://github.com/feross/buffer/pull/148
+Buffer.prototype.__proto__ = Uint8Array.prototype;
+Buffer.__proto__ = Uint8Array;
+
+function assertSize (size) {
+ if (typeof size !== 'number') {
+ throw new TypeError('"size" argument must be a number')
+ } else if (size < 0) {
+ throw new RangeError('"size" argument must not be negative')
+ }
+}
+
+function alloc (size, fill, encoding) {
+ assertSize(size);
+ if (size <= 0) {
+ return createBuffer(size)
+ }
+ if (fill !== undefined) {
+ // Only pay attention to encoding if it's a string. This
+ // prevents accidentally sending in a number that would
+ // be interpretted as a start offset.
+ return typeof encoding === 'string'
+ ? createBuffer(size).fill(fill, encoding)
+ : createBuffer(size).fill(fill)
+ }
+ return createBuffer(size)
+}
+
+/**
+ * Creates a new filled Buffer instance.
+ * alloc(size[, fill[, encoding]])
+ **/
+Buffer.alloc = function (size, fill, encoding) {
+ return alloc(size, fill, encoding)
+};
+
+function allocUnsafe (size) {
+ assertSize(size);
+ return createBuffer(size < 0 ? 0 : checked(size) | 0)
+}
+
+/**
+ * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance.
+ * */
+Buffer.allocUnsafe = function (size) {
+ return allocUnsafe(size)
+};
+/**
+ * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance.
+ */
+Buffer.allocUnsafeSlow = function (size) {
+ return allocUnsafe(size)
+};
+
+function fromString (string, encoding) {
+ if (typeof encoding !== 'string' || encoding === '') {
+ encoding = 'utf8';
+ }
+
+ if (!Buffer.isEncoding(encoding)) {
+ throw new TypeError('"encoding" must be a valid string encoding')
+ }
+
+ var length = byteLength(string, encoding) | 0;
+ var buf = createBuffer(length);
+
+ var actual = buf.write(string, encoding);
+
+ if (actual !== length) {
+ // Writing a hex string, for example, that contains invalid characters will
+ // cause everything after the first invalid character to be ignored. (e.g.
+ // 'abxxcd' will be treated as 'ab')
+ buf = buf.slice(0, actual);
+ }
+
+ return buf
+}
+
+function fromArrayLike (array) {
+ var length = array.length < 0 ? 0 : checked(array.length) | 0;
+ var buf = createBuffer(length);
+ for (var i = 0; i < length; i += 1) {
+ buf[i] = array[i] & 255;
+ }
+ return buf
+}
+
+function fromArrayBuffer (array, byteOffset, length) {
+ if (byteOffset < 0 || array.byteLength < byteOffset) {
+ throw new RangeError('\'offset\' is out of bounds')
+ }
+
+ if (array.byteLength < byteOffset + (length || 0)) {
+ throw new RangeError('\'length\' is out of bounds')
+ }
+
+ var buf;
+ if (byteOffset === undefined && length === undefined) {
+ buf = new Uint8Array(array);
+ } else if (length === undefined) {
+ buf = new Uint8Array(array, byteOffset);
+ } else {
+ buf = new Uint8Array(array, byteOffset, length);
+ }
+
+ // Return an augmented `Uint8Array` instance
+ buf.__proto__ = Buffer.prototype;
+ return buf
+}
+
+function fromObject (obj) {
+ if (Buffer.isBuffer(obj)) {
+ var len = checked(obj.length) | 0;
+ var buf = createBuffer(len);
+
+ if (buf.length === 0) {
+ return buf
+ }
+
+ obj.copy(buf, 0, 0, len);
+ return buf
+ }
+
+ if (obj) {
+ if (isArrayBufferView(obj) || 'length' in obj) {
+ if (typeof obj.length !== 'number' || numberIsNaN(obj.length)) {
+ return createBuffer(0)
+ }
+ return fromArrayLike(obj)
+ }
+
+ if (obj.type === 'Buffer' && Array.isArray(obj.data)) {
+ return fromArrayLike(obj.data)
+ }
+ }
+
+ throw new TypeError('First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.')
+}
+
+function checked (length) {
+ // Note: cannot use `length < K_MAX_LENGTH` here because that fails when
+ // length is NaN (which is otherwise coerced to zero.)
+ if (length >= K_MAX_LENGTH) {
+ throw new RangeError('Attempt to allocate Buffer larger than maximum ' +
+ 'size: 0x' + K_MAX_LENGTH.toString(16) + ' bytes')
+ }
+ return length | 0
+}
+
+function SlowBuffer (length) {
+ if (+length != length) { // eslint-disable-line eqeqeq
+ length = 0;
+ }
+ return Buffer.alloc(+length)
+}
+
+Buffer.isBuffer = function isBuffer (b) {
+ return b != null && b._isBuffer === true
+};
+
+Buffer.compare = function compare (a, b) {
+ if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) {
+ throw new TypeError('Arguments must be Buffers')
+ }
+
+ if (a === b) return 0
+
+ var x = a.length;
+ var y = b.length;
+
+ for (var i = 0, len = Math.min(x, y); i < len; ++i) {
+ if (a[i] !== b[i]) {
+ x = a[i];
+ y = b[i];
+ break
+ }
+ }
+
+ if (x < y) return -1
+ if (y < x) return 1
+ return 0
+};
+
+Buffer.isEncoding = function isEncoding (encoding) {
+ switch (String(encoding).toLowerCase()) {
+ case 'hex':
+ case 'utf8':
+ case 'utf-8':
+ case 'ascii':
+ case 'latin1':
+ case 'binary':
+ case 'base64':
+ case 'ucs2':
+ case 'ucs-2':
+ case 'utf16le':
+ case 'utf-16le':
+ return true
+ default:
+ return false
+ }
+};
+
+Buffer.concat = function concat (list, length) {
+ if (!Array.isArray(list)) {
+ throw new TypeError('"list" argument must be an Array of Buffers')
+ }
+
+ if (list.length === 0) {
+ return Buffer.alloc(0)
+ }
+
+ var i;
+ if (length === undefined) {
+ length = 0;
+ for (i = 0; i < list.length; ++i) {
+ length += list[i].length;
+ }
+ }
+
+ var buffer = Buffer.allocUnsafe(length);
+ var pos = 0;
+ for (i = 0; i < list.length; ++i) {
+ var buf = list[i];
+ if (!Buffer.isBuffer(buf)) {
+ throw new TypeError('"list" argument must be an Array of Buffers')
+ }
+ buf.copy(buffer, pos);
+ pos += buf.length;
+ }
+ return buffer
+};
+
+function byteLength (string, encoding) {
+ if (Buffer.isBuffer(string)) {
+ return string.length
+ }
+ if (isArrayBufferView(string) || isArrayBuffer(string)) {
+ return string.byteLength
+ }
+ if (typeof string !== 'string') {
+ string = '' + string;
+ }
+
+ var len = string.length;
+ if (len === 0) return 0
+
+ // Use a for loop to avoid recursion
+ var loweredCase = false;
+ for (;;) {
+ switch (encoding) {
+ case 'ascii':
+ case 'latin1':
+ case 'binary':
+ return len
+ case 'utf8':
+ case 'utf-8':
+ case undefined:
+ return utf8ToBytes(string).length
+ case 'ucs2':
+ case 'ucs-2':
+ case 'utf16le':
+ case 'utf-16le':
+ return len * 2
+ case 'hex':
+ return len >>> 1
+ case 'base64':
+ return base64ToBytes(string).length
+ default:
+ if (loweredCase) return utf8ToBytes(string).length // assume utf8
+ encoding = ('' + encoding).toLowerCase();
+ loweredCase = true;
+ }
+ }
+}
+Buffer.byteLength = byteLength;
+
+function slowToString (encoding, start, end) {
+ var loweredCase = false;
+
+ // No need to verify that "this.length <= MAX_UINT32" since it's a read-only
+ // property of a typed array.
+
+ // This behaves neither like String nor Uint8Array in that we set start/end
+ // to their upper/lower bounds if the value passed is out of range.
+ // undefined is handled specially as per ECMA-262 6th Edition,
+ // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.
+ if (start === undefined || start < 0) {
+ start = 0;
+ }
+ // Return early if start > this.length. Done here to prevent potential uint32
+ // coercion fail below.
+ if (start > this.length) {
+ return ''
+ }
+
+ if (end === undefined || end > this.length) {
+ end = this.length;
+ }
+
+ if (end <= 0) {
+ return ''
+ }
+
+ // Force coersion to uint32. This will also coerce falsey/NaN values to 0.
+ end >>>= 0;
+ start >>>= 0;
+
+ if (end <= start) {
+ return ''
+ }
+
+ if (!encoding) encoding = 'utf8';
+
+ while (true) {
+ switch (encoding) {
+ case 'hex':
+ return hexSlice(this, start, end)
+
+ case 'utf8':
+ case 'utf-8':
+ return utf8Slice(this, start, end)
+
+ case 'ascii':
+ return asciiSlice(this, start, end)
+
+ case 'latin1':
+ case 'binary':
+ return latin1Slice(this, start, end)
+
+ case 'base64':
+ return base64Slice(this, start, end)
+
+ case 'ucs2':
+ case 'ucs-2':
+ case 'utf16le':
+ case 'utf-16le':
+ return utf16leSlice(this, start, end)
+
+ default:
+ if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)
+ encoding = (encoding + '').toLowerCase();
+ loweredCase = true;
+ }
+ }
+}
+
+// This property is used by `Buffer.isBuffer` (and the `is-buffer` npm package)
+// to detect a Buffer instance. It's not possible to use `instanceof Buffer`
+// reliably in a browserify context because there could be multiple different
+// copies of the 'buffer' package in use. This method works even for Buffer
+// instances that were created from another copy of the `buffer` package.
+// See: https://github.com/feross/buffer/issues/154
+Buffer.prototype._isBuffer = true;
+
+function swap (b, n, m) {
+ var i = b[n];
+ b[n] = b[m];
+ b[m] = i;
+}
+
+Buffer.prototype.swap16 = function swap16 () {
+ var len = this.length;
+ if (len % 2 !== 0) {
+ throw new RangeError('Buffer size must be a multiple of 16-bits')
+ }
+ for (var i = 0; i < len; i += 2) {
+ swap(this, i, i + 1);
+ }
+ return this
+};
+
+Buffer.prototype.swap32 = function swap32 () {
+ var len = this.length;
+ if (len % 4 !== 0) {
+ throw new RangeError('Buffer size must be a multiple of 32-bits')
+ }
+ for (var i = 0; i < len; i += 4) {
+ swap(this, i, i + 3);
+ swap(this, i + 1, i + 2);
+ }
+ return this
+};
+
+Buffer.prototype.swap64 = function swap64 () {
+ var len = this.length;
+ if (len % 8 !== 0) {
+ throw new RangeError('Buffer size must be a multiple of 64-bits')
+ }
+ for (var i = 0; i < len; i += 8) {
+ swap(this, i, i + 7);
+ swap(this, i + 1, i + 6);
+ swap(this, i + 2, i + 5);
+ swap(this, i + 3, i + 4);
+ }
+ return this
+};
+
+Buffer.prototype.toString = function toString () {
+ var length = this.length;
+ if (length === 0) return ''
+ if (arguments.length === 0) return utf8Slice(this, 0, length)
+ return slowToString.apply(this, arguments)
+};
+
+Buffer.prototype.equals = function equals (b) {
+ if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer')
+ if (this === b) return true
+ return Buffer.compare(this, b) === 0
+};
+
+Buffer.prototype.inspect = function inspect () {
+ var str = '';
+ var max = exports.INSPECT_MAX_BYTES;
+ if (this.length > 0) {
+ str = this.toString('hex', 0, max).match(/.{2}/g).join(' ');
+ if (this.length > max) str += ' ... ';
+ }
+ return ''
+};
+
+Buffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) {
+ if (!Buffer.isBuffer(target)) {
+ throw new TypeError('Argument must be a Buffer')
+ }
+
+ if (start === undefined) {
+ start = 0;
+ }
+ if (end === undefined) {
+ end = target ? target.length : 0;
+ }
+ if (thisStart === undefined) {
+ thisStart = 0;
+ }
+ if (thisEnd === undefined) {
+ thisEnd = this.length;
+ }
+
+ if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) {
+ throw new RangeError('out of range index')
+ }
+
+ if (thisStart >= thisEnd && start >= end) {
+ return 0
+ }
+ if (thisStart >= thisEnd) {
+ return -1
+ }
+ if (start >= end) {
+ return 1
+ }
+
+ start >>>= 0;
+ end >>>= 0;
+ thisStart >>>= 0;
+ thisEnd >>>= 0;
+
+ if (this === target) return 0
+
+ var x = thisEnd - thisStart;
+ var y = end - start;
+ var len = Math.min(x, y);
+
+ var thisCopy = this.slice(thisStart, thisEnd);
+ var targetCopy = target.slice(start, end);
+
+ for (var i = 0; i < len; ++i) {
+ if (thisCopy[i] !== targetCopy[i]) {
+ x = thisCopy[i];
+ y = targetCopy[i];
+ break
+ }
+ }
+
+ if (x < y) return -1
+ if (y < x) return 1
+ return 0
+};
+
+// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,
+// OR the last index of `val` in `buffer` at offset <= `byteOffset`.
+//
+// Arguments:
+// - buffer - a Buffer to search
+// - val - a string, Buffer, or number
+// - byteOffset - an index into `buffer`; will be clamped to an int32
+// - encoding - an optional encoding, relevant is val is a string
+// - dir - true for indexOf, false for lastIndexOf
+function bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) {
+ // Empty buffer means no match
+ if (buffer.length === 0) return -1
+
+ // Normalize byteOffset
+ if (typeof byteOffset === 'string') {
+ encoding = byteOffset;
+ byteOffset = 0;
+ } else if (byteOffset > 0x7fffffff) {
+ byteOffset = 0x7fffffff;
+ } else if (byteOffset < -0x80000000) {
+ byteOffset = -0x80000000;
+ }
+ byteOffset = +byteOffset; // Coerce to Number.
+ if (numberIsNaN(byteOffset)) {
+ // byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer
+ byteOffset = dir ? 0 : (buffer.length - 1);
+ }
+
+ // Normalize byteOffset: negative offsets start from the end of the buffer
+ if (byteOffset < 0) byteOffset = buffer.length + byteOffset;
+ if (byteOffset >= buffer.length) {
+ if (dir) return -1
+ else byteOffset = buffer.length - 1;
+ } else if (byteOffset < 0) {
+ if (dir) byteOffset = 0;
+ else return -1
+ }
+
+ // Normalize val
+ if (typeof val === 'string') {
+ val = Buffer.from(val, encoding);
+ }
+
+ // Finally, search either indexOf (if dir is true) or lastIndexOf
+ if (Buffer.isBuffer(val)) {
+ // Special case: looking for empty string/buffer always fails
+ if (val.length === 0) {
+ return -1
+ }
+ return arrayIndexOf(buffer, val, byteOffset, encoding, dir)
+ } else if (typeof val === 'number') {
+ val = val & 0xFF; // Search for a byte value [0-255]
+ if (typeof Uint8Array.prototype.indexOf === 'function') {
+ if (dir) {
+ return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset)
+ } else {
+ return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset)
+ }
+ }
+ return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir)
+ }
+
+ throw new TypeError('val must be string, number or Buffer')
+}
+
+function arrayIndexOf (arr, val, byteOffset, encoding, dir) {
+ var indexSize = 1;
+ var arrLength = arr.length;
+ var valLength = val.length;
+
+ if (encoding !== undefined) {
+ encoding = String(encoding).toLowerCase();
+ if (encoding === 'ucs2' || encoding === 'ucs-2' ||
+ encoding === 'utf16le' || encoding === 'utf-16le') {
+ if (arr.length < 2 || val.length < 2) {
+ return -1
+ }
+ indexSize = 2;
+ arrLength /= 2;
+ valLength /= 2;
+ byteOffset /= 2;
+ }
+ }
+
+ function read (buf, i) {
+ if (indexSize === 1) {
+ return buf[i]
+ } else {
+ return buf.readUInt16BE(i * indexSize)
+ }
+ }
+
+ var i;
+ if (dir) {
+ var foundIndex = -1;
+ for (i = byteOffset; i < arrLength; i++) {
+ if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) {
+ if (foundIndex === -1) foundIndex = i;
+ if (i - foundIndex + 1 === valLength) return foundIndex * indexSize
+ } else {
+ if (foundIndex !== -1) i -= i - foundIndex;
+ foundIndex = -1;
+ }
+ }
+ } else {
+ if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength;
+ for (i = byteOffset; i >= 0; i--) {
+ var found = true;
+ for (var j = 0; j < valLength; j++) {
+ if (read(arr, i + j) !== read(val, j)) {
+ found = false;
+ break
+ }
+ }
+ if (found) return i
+ }
+ }
+
+ return -1
+}
+
+Buffer.prototype.includes = function includes (val, byteOffset, encoding) {
+ return this.indexOf(val, byteOffset, encoding) !== -1
+};
+
+Buffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) {
+ return bidirectionalIndexOf(this, val, byteOffset, encoding, true)
+};
+
+Buffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) {
+ return bidirectionalIndexOf(this, val, byteOffset, encoding, false)
+};
+
+function hexWrite (buf, string, offset, length) {
+ offset = Number(offset) || 0;
+ var remaining = buf.length - offset;
+ if (!length) {
+ length = remaining;
+ } else {
+ length = Number(length);
+ if (length > remaining) {
+ length = remaining;
+ }
+ }
+
+ // must be an even number of digits
+ var strLen = string.length;
+ if (strLen % 2 !== 0) throw new TypeError('Invalid hex string')
+
+ if (length > strLen / 2) {
+ length = strLen / 2;
+ }
+ for (var i = 0; i < length; ++i) {
+ var parsed = parseInt(string.substr(i * 2, 2), 16);
+ if (numberIsNaN(parsed)) return i
+ buf[offset + i] = parsed;
+ }
+ return i
+}
+
+function utf8Write (buf, string, offset, length) {
+ return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length)
+}
+
+function asciiWrite (buf, string, offset, length) {
+ return blitBuffer(asciiToBytes(string), buf, offset, length)
+}
+
+function latin1Write (buf, string, offset, length) {
+ return asciiWrite(buf, string, offset, length)
+}
+
+function base64Write (buf, string, offset, length) {
+ return blitBuffer(base64ToBytes(string), buf, offset, length)
+}
+
+function ucs2Write (buf, string, offset, length) {
+ return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length)
+}
+
+Buffer.prototype.write = function write (string, offset, length, encoding) {
+ // Buffer#write(string)
+ if (offset === undefined) {
+ encoding = 'utf8';
+ length = this.length;
+ offset = 0;
+ // Buffer#write(string, encoding)
+ } else if (length === undefined && typeof offset === 'string') {
+ encoding = offset;
+ length = this.length;
+ offset = 0;
+ // Buffer#write(string, offset[, length][, encoding])
+ } else if (isFinite(offset)) {
+ offset = offset >>> 0;
+ if (isFinite(length)) {
+ length = length >>> 0;
+ if (encoding === undefined) encoding = 'utf8';
+ } else {
+ encoding = length;
+ length = undefined;
+ }
+ } else {
+ throw new Error(
+ 'Buffer.write(string, encoding, offset[, length]) is no longer supported'
+ )
+ }
+
+ var remaining = this.length - offset;
+ if (length === undefined || length > remaining) length = remaining;
+
+ if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) {
+ throw new RangeError('Attempt to write outside buffer bounds')
+ }
+
+ if (!encoding) encoding = 'utf8';
+
+ var loweredCase = false;
+ for (;;) {
+ switch (encoding) {
+ case 'hex':
+ return hexWrite(this, string, offset, length)
+
+ case 'utf8':
+ case 'utf-8':
+ return utf8Write(this, string, offset, length)
+
+ case 'ascii':
+ return asciiWrite(this, string, offset, length)
+
+ case 'latin1':
+ case 'binary':
+ return latin1Write(this, string, offset, length)
+
+ case 'base64':
+ // Warning: maxLength not taken into account in base64Write
+ return base64Write(this, string, offset, length)
+
+ case 'ucs2':
+ case 'ucs-2':
+ case 'utf16le':
+ case 'utf-16le':
+ return ucs2Write(this, string, offset, length)
+
+ default:
+ if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)
+ encoding = ('' + encoding).toLowerCase();
+ loweredCase = true;
+ }
+ }
+};
+
+Buffer.prototype.toJSON = function toJSON () {
+ return {
+ type: 'Buffer',
+ data: Array.prototype.slice.call(this._arr || this, 0)
+ }
+};
+
+function base64Slice (buf, start, end) {
+ if (start === 0 && end === buf.length) {
+ return base64Js.fromByteArray(buf)
+ } else {
+ return base64Js.fromByteArray(buf.slice(start, end))
+ }
+}
+
+function utf8Slice (buf, start, end) {
+ end = Math.min(buf.length, end);
+ var res = [];
+
+ var i = start;
+ while (i < end) {
+ var firstByte = buf[i];
+ var codePoint = null;
+ var bytesPerSequence = (firstByte > 0xEF) ? 4
+ : (firstByte > 0xDF) ? 3
+ : (firstByte > 0xBF) ? 2
+ : 1;
+
+ if (i + bytesPerSequence <= end) {
+ var secondByte, thirdByte, fourthByte, tempCodePoint;
+
+ switch (bytesPerSequence) {
+ case 1:
+ if (firstByte < 0x80) {
+ codePoint = firstByte;
+ }
+ break
+ case 2:
+ secondByte = buf[i + 1];
+ if ((secondByte & 0xC0) === 0x80) {
+ tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F);
+ if (tempCodePoint > 0x7F) {
+ codePoint = tempCodePoint;
+ }
+ }
+ break
+ case 3:
+ secondByte = buf[i + 1];
+ thirdByte = buf[i + 2];
+ if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) {
+ tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F);
+ if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) {
+ codePoint = tempCodePoint;
+ }
+ }
+ break
+ case 4:
+ secondByte = buf[i + 1];
+ thirdByte = buf[i + 2];
+ fourthByte = buf[i + 3];
+ if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) {
+ tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F);
+ if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) {
+ codePoint = tempCodePoint;
+ }
+ }
+ }
+ }
+
+ if (codePoint === null) {
+ // we did not generate a valid codePoint so insert a
+ // replacement char (U+FFFD) and advance only 1 byte
+ codePoint = 0xFFFD;
+ bytesPerSequence = 1;
+ } else if (codePoint > 0xFFFF) {
+ // encode to utf16 (surrogate pair dance)
+ codePoint -= 0x10000;
+ res.push(codePoint >>> 10 & 0x3FF | 0xD800);
+ codePoint = 0xDC00 | codePoint & 0x3FF;
+ }
+
+ res.push(codePoint);
+ i += bytesPerSequence;
+ }
+
+ return decodeCodePointsArray(res)
+}
+
+// Based on http://stackoverflow.com/a/22747272/680742, the browser with
+// the lowest limit is Chrome, with 0x10000 args.
+// We go 1 magnitude less, for safety
+var MAX_ARGUMENTS_LENGTH = 0x1000;
+
+function decodeCodePointsArray (codePoints) {
+ var len = codePoints.length;
+ if (len <= MAX_ARGUMENTS_LENGTH) {
+ return String.fromCharCode.apply(String, codePoints) // avoid extra slice()
+ }
+
+ // Decode in chunks to avoid "call stack size exceeded".
+ var res = '';
+ var i = 0;
+ while (i < len) {
+ res += String.fromCharCode.apply(
+ String,
+ codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH)
+ );
+ }
+ return res
+}
+
+function asciiSlice (buf, start, end) {
+ var ret = '';
+ end = Math.min(buf.length, end);
+
+ for (var i = start; i < end; ++i) {
+ ret += String.fromCharCode(buf[i] & 0x7F);
+ }
+ return ret
+}
+
+function latin1Slice (buf, start, end) {
+ var ret = '';
+ end = Math.min(buf.length, end);
+
+ for (var i = start; i < end; ++i) {
+ ret += String.fromCharCode(buf[i]);
+ }
+ return ret
+}
+
+function hexSlice (buf, start, end) {
+ var len = buf.length;
+
+ if (!start || start < 0) start = 0;
+ if (!end || end < 0 || end > len) end = len;
+
+ var out = '';
+ for (var i = start; i < end; ++i) {
+ out += toHex(buf[i]);
+ }
+ return out
+}
+
+function utf16leSlice (buf, start, end) {
+ var bytes = buf.slice(start, end);
+ var res = '';
+ for (var i = 0; i < bytes.length; i += 2) {
+ res += String.fromCharCode(bytes[i] + (bytes[i + 1] * 256));
+ }
+ return res
+}
+
+Buffer.prototype.slice = function slice (start, end) {
+ var len = this.length;
+ start = ~~start;
+ end = end === undefined ? len : ~~end;
+
+ if (start < 0) {
+ start += len;
+ if (start < 0) start = 0;
+ } else if (start > len) {
+ start = len;
+ }
+
+ if (end < 0) {
+ end += len;
+ if (end < 0) end = 0;
+ } else if (end > len) {
+ end = len;
+ }
+
+ if (end < start) end = start;
+
+ var newBuf = this.subarray(start, end);
+ // Return an augmented `Uint8Array` instance
+ newBuf.__proto__ = Buffer.prototype;
+ return newBuf
+};
+
+/*
+ * Need to make sure that buffer isn't trying to write out of bounds.
+ */
+function checkOffset (offset, ext, length) {
+ if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint')
+ if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length')
+}
+
+Buffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) {
+ offset = offset >>> 0;
+ byteLength = byteLength >>> 0;
+ if (!noAssert) checkOffset(offset, byteLength, this.length);
+
+ var val = this[offset];
+ var mul = 1;
+ var i = 0;
+ while (++i < byteLength && (mul *= 0x100)) {
+ val += this[offset + i] * mul;
+ }
+
+ return val
+};
+
+Buffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) {
+ offset = offset >>> 0;
+ byteLength = byteLength >>> 0;
+ if (!noAssert) {
+ checkOffset(offset, byteLength, this.length);
+ }
+
+ var val = this[offset + --byteLength];
+ var mul = 1;
+ while (byteLength > 0 && (mul *= 0x100)) {
+ val += this[offset + --byteLength] * mul;
+ }
+
+ return val
+};
+
+Buffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 1, this.length);
+ return this[offset]
+};
+
+Buffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 2, this.length);
+ return this[offset] | (this[offset + 1] << 8)
+};
+
+Buffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 2, this.length);
+ return (this[offset] << 8) | this[offset + 1]
+};
+
+Buffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 4, this.length);
+
+ return ((this[offset]) |
+ (this[offset + 1] << 8) |
+ (this[offset + 2] << 16)) +
+ (this[offset + 3] * 0x1000000)
+};
+
+Buffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 4, this.length);
+
+ return (this[offset] * 0x1000000) +
+ ((this[offset + 1] << 16) |
+ (this[offset + 2] << 8) |
+ this[offset + 3])
+};
+
+Buffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) {
+ offset = offset >>> 0;
+ byteLength = byteLength >>> 0;
+ if (!noAssert) checkOffset(offset, byteLength, this.length);
+
+ var val = this[offset];
+ var mul = 1;
+ var i = 0;
+ while (++i < byteLength && (mul *= 0x100)) {
+ val += this[offset + i] * mul;
+ }
+ mul *= 0x80;
+
+ if (val >= mul) val -= Math.pow(2, 8 * byteLength);
+
+ return val
+};
+
+Buffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) {
+ offset = offset >>> 0;
+ byteLength = byteLength >>> 0;
+ if (!noAssert) checkOffset(offset, byteLength, this.length);
+
+ var i = byteLength;
+ var mul = 1;
+ var val = this[offset + --i];
+ while (i > 0 && (mul *= 0x100)) {
+ val += this[offset + --i] * mul;
+ }
+ mul *= 0x80;
+
+ if (val >= mul) val -= Math.pow(2, 8 * byteLength);
+
+ return val
+};
+
+Buffer.prototype.readInt8 = function readInt8 (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 1, this.length);
+ if (!(this[offset] & 0x80)) return (this[offset])
+ return ((0xff - this[offset] + 1) * -1)
+};
+
+Buffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 2, this.length);
+ var val = this[offset] | (this[offset + 1] << 8);
+ return (val & 0x8000) ? val | 0xFFFF0000 : val
+};
+
+Buffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 2, this.length);
+ var val = this[offset + 1] | (this[offset] << 8);
+ return (val & 0x8000) ? val | 0xFFFF0000 : val
+};
+
+Buffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 4, this.length);
+
+ return (this[offset]) |
+ (this[offset + 1] << 8) |
+ (this[offset + 2] << 16) |
+ (this[offset + 3] << 24)
+};
+
+Buffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 4, this.length);
+
+ return (this[offset] << 24) |
+ (this[offset + 1] << 16) |
+ (this[offset + 2] << 8) |
+ (this[offset + 3])
+};
+
+Buffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 4, this.length);
+ return ieee754.read(this, offset, true, 23, 4)
+};
+
+Buffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 4, this.length);
+ return ieee754.read(this, offset, false, 23, 4)
+};
+
+Buffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 8, this.length);
+ return ieee754.read(this, offset, true, 52, 8)
+};
+
+Buffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) {
+ offset = offset >>> 0;
+ if (!noAssert) checkOffset(offset, 8, this.length);
+ return ieee754.read(this, offset, false, 52, 8)
+};
+
+function checkInt (buf, value, offset, ext, max, min) {
+ if (!Buffer.isBuffer(buf)) throw new TypeError('"buffer" argument must be a Buffer instance')
+ if (value > max || value < min) throw new RangeError('"value" argument is out of bounds')
+ if (offset + ext > buf.length) throw new RangeError('Index out of range')
+}
+
+Buffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ byteLength = byteLength >>> 0;
+ if (!noAssert) {
+ var maxBytes = Math.pow(2, 8 * byteLength) - 1;
+ checkInt(this, value, offset, byteLength, maxBytes, 0);
+ }
+
+ var mul = 1;
+ var i = 0;
+ this[offset] = value & 0xFF;
+ while (++i < byteLength && (mul *= 0x100)) {
+ this[offset + i] = (value / mul) & 0xFF;
+ }
+
+ return offset + byteLength
+};
+
+Buffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ byteLength = byteLength >>> 0;
+ if (!noAssert) {
+ var maxBytes = Math.pow(2, 8 * byteLength) - 1;
+ checkInt(this, value, offset, byteLength, maxBytes, 0);
+ }
+
+ var i = byteLength - 1;
+ var mul = 1;
+ this[offset + i] = value & 0xFF;
+ while (--i >= 0 && (mul *= 0x100)) {
+ this[offset + i] = (value / mul) & 0xFF;
+ }
+
+ return offset + byteLength
+};
+
+Buffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0);
+ this[offset] = (value & 0xff);
+ return offset + 1
+};
+
+Buffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0);
+ this[offset] = (value & 0xff);
+ this[offset + 1] = (value >>> 8);
+ return offset + 2
+};
+
+Buffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0);
+ this[offset] = (value >>> 8);
+ this[offset + 1] = (value & 0xff);
+ return offset + 2
+};
+
+Buffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0);
+ this[offset + 3] = (value >>> 24);
+ this[offset + 2] = (value >>> 16);
+ this[offset + 1] = (value >>> 8);
+ this[offset] = (value & 0xff);
+ return offset + 4
+};
+
+Buffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0);
+ this[offset] = (value >>> 24);
+ this[offset + 1] = (value >>> 16);
+ this[offset + 2] = (value >>> 8);
+ this[offset + 3] = (value & 0xff);
+ return offset + 4
+};
+
+Buffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) {
+ var limit = Math.pow(2, (8 * byteLength) - 1);
+
+ checkInt(this, value, offset, byteLength, limit - 1, -limit);
+ }
+
+ var i = 0;
+ var mul = 1;
+ var sub = 0;
+ this[offset] = value & 0xFF;
+ while (++i < byteLength && (mul *= 0x100)) {
+ if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) {
+ sub = 1;
+ }
+ this[offset + i] = ((value / mul) >> 0) - sub & 0xFF;
+ }
+
+ return offset + byteLength
+};
+
+Buffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) {
+ var limit = Math.pow(2, (8 * byteLength) - 1);
+
+ checkInt(this, value, offset, byteLength, limit - 1, -limit);
+ }
+
+ var i = byteLength - 1;
+ var mul = 1;
+ var sub = 0;
+ this[offset + i] = value & 0xFF;
+ while (--i >= 0 && (mul *= 0x100)) {
+ if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) {
+ sub = 1;
+ }
+ this[offset + i] = ((value / mul) >> 0) - sub & 0xFF;
+ }
+
+ return offset + byteLength
+};
+
+Buffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80);
+ if (value < 0) value = 0xff + value + 1;
+ this[offset] = (value & 0xff);
+ return offset + 1
+};
+
+Buffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000);
+ this[offset] = (value & 0xff);
+ this[offset + 1] = (value >>> 8);
+ return offset + 2
+};
+
+Buffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000);
+ this[offset] = (value >>> 8);
+ this[offset + 1] = (value & 0xff);
+ return offset + 2
+};
+
+Buffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000);
+ this[offset] = (value & 0xff);
+ this[offset + 1] = (value >>> 8);
+ this[offset + 2] = (value >>> 16);
+ this[offset + 3] = (value >>> 24);
+ return offset + 4
+};
+
+Buffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000);
+ if (value < 0) value = 0xffffffff + value + 1;
+ this[offset] = (value >>> 24);
+ this[offset + 1] = (value >>> 16);
+ this[offset + 2] = (value >>> 8);
+ this[offset + 3] = (value & 0xff);
+ return offset + 4
+};
+
+function checkIEEE754 (buf, value, offset, ext, max, min) {
+ if (offset + ext > buf.length) throw new RangeError('Index out of range')
+ if (offset < 0) throw new RangeError('Index out of range')
+}
+
+function writeFloat (buf, value, offset, littleEndian, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) {
+ checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38);
+ }
+ ieee754.write(buf, value, offset, littleEndian, 23, 4);
+ return offset + 4
+}
+
+Buffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) {
+ return writeFloat(this, value, offset, true, noAssert)
+};
+
+Buffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) {
+ return writeFloat(this, value, offset, false, noAssert)
+};
+
+function writeDouble (buf, value, offset, littleEndian, noAssert) {
+ value = +value;
+ offset = offset >>> 0;
+ if (!noAssert) {
+ checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308);
+ }
+ ieee754.write(buf, value, offset, littleEndian, 52, 8);
+ return offset + 8
+}
+
+Buffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) {
+ return writeDouble(this, value, offset, true, noAssert)
+};
+
+Buffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) {
+ return writeDouble(this, value, offset, false, noAssert)
+};
+
+// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)
+Buffer.prototype.copy = function copy (target, targetStart, start, end) {
+ if (!start) start = 0;
+ if (!end && end !== 0) end = this.length;
+ if (targetStart >= target.length) targetStart = target.length;
+ if (!targetStart) targetStart = 0;
+ if (end > 0 && end < start) end = start;
+
+ // Copy 0 bytes; we're done
+ if (end === start) return 0
+ if (target.length === 0 || this.length === 0) return 0
+
+ // Fatal error conditions
+ if (targetStart < 0) {
+ throw new RangeError('targetStart out of bounds')
+ }
+ if (start < 0 || start >= this.length) throw new RangeError('sourceStart out of bounds')
+ if (end < 0) throw new RangeError('sourceEnd out of bounds')
+
+ // Are we oob?
+ if (end > this.length) end = this.length;
+ if (target.length - targetStart < end - start) {
+ end = target.length - targetStart + start;
+ }
+
+ var len = end - start;
+ var i;
+
+ if (this === target && start < targetStart && targetStart < end) {
+ // descending copy from end
+ for (i = len - 1; i >= 0; --i) {
+ target[i + targetStart] = this[i + start];
+ }
+ } else if (len < 1000) {
+ // ascending copy from start
+ for (i = 0; i < len; ++i) {
+ target[i + targetStart] = this[i + start];
+ }
+ } else {
+ Uint8Array.prototype.set.call(
+ target,
+ this.subarray(start, start + len),
+ targetStart
+ );
+ }
+
+ return len
+};
+
+// Usage:
+// buffer.fill(number[, offset[, end]])
+// buffer.fill(buffer[, offset[, end]])
+// buffer.fill(string[, offset[, end]][, encoding])
+Buffer.prototype.fill = function fill (val, start, end, encoding) {
+ // Handle string cases:
+ if (typeof val === 'string') {
+ if (typeof start === 'string') {
+ encoding = start;
+ start = 0;
+ end = this.length;
+ } else if (typeof end === 'string') {
+ encoding = end;
+ end = this.length;
+ }
+ if (val.length === 1) {
+ var code = val.charCodeAt(0);
+ if (code < 256) {
+ val = code;
+ }
+ }
+ if (encoding !== undefined && typeof encoding !== 'string') {
+ throw new TypeError('encoding must be a string')
+ }
+ if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) {
+ throw new TypeError('Unknown encoding: ' + encoding)
+ }
+ } else if (typeof val === 'number') {
+ val = val & 255;
+ }
+
+ // Invalid ranges are not set to a default, so can range check early.
+ if (start < 0 || this.length < start || this.length < end) {
+ throw new RangeError('Out of range index')
+ }
+
+ if (end <= start) {
+ return this
+ }
+
+ start = start >>> 0;
+ end = end === undefined ? this.length : end >>> 0;
+
+ if (!val) val = 0;
+
+ var i;
+ if (typeof val === 'number') {
+ for (i = start; i < end; ++i) {
+ this[i] = val;
+ }
+ } else {
+ var bytes = Buffer.isBuffer(val)
+ ? val
+ : new Buffer(val, encoding);
+ var len = bytes.length;
+ for (i = 0; i < end - start; ++i) {
+ this[i + start] = bytes[i % len];
+ }
+ }
+
+ return this
+};
+
+// HELPER FUNCTIONS
+// ================
+
+var INVALID_BASE64_RE = /[^+/0-9A-Za-z-_]/g;
+
+function base64clean (str) {
+ // Node strips out invalid characters like \n and \t from the string, base64-js does not
+ str = str.trim().replace(INVALID_BASE64_RE, '');
+ // Node converts strings with length < 2 to ''
+ if (str.length < 2) return ''
+ // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not
+ while (str.length % 4 !== 0) {
+ str = str + '=';
+ }
+ return str
+}
+
+function toHex (n) {
+ if (n < 16) return '0' + n.toString(16)
+ return n.toString(16)
+}
+
+function utf8ToBytes (string, units) {
+ units = units || Infinity;
+ var codePoint;
+ var length = string.length;
+ var leadSurrogate = null;
+ var bytes = [];
+
+ for (var i = 0; i < length; ++i) {
+ codePoint = string.charCodeAt(i);
+
+ // is surrogate component
+ if (codePoint > 0xD7FF && codePoint < 0xE000) {
+ // last char was a lead
+ if (!leadSurrogate) {
+ // no lead yet
+ if (codePoint > 0xDBFF) {
+ // unexpected trail
+ if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD);
+ continue
+ } else if (i + 1 === length) {
+ // unpaired lead
+ if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD);
+ continue
+ }
+
+ // valid lead
+ leadSurrogate = codePoint;
+
+ continue
+ }
+
+ // 2 leads in a row
+ if (codePoint < 0xDC00) {
+ if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD);
+ leadSurrogate = codePoint;
+ continue
+ }
+
+ // valid surrogate pair
+ codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000;
+ } else if (leadSurrogate) {
+ // valid bmp char, but last char was a lead
+ if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD);
+ }
+
+ leadSurrogate = null;
+
+ // encode utf8
+ if (codePoint < 0x80) {
+ if ((units -= 1) < 0) break
+ bytes.push(codePoint);
+ } else if (codePoint < 0x800) {
+ if ((units -= 2) < 0) break
+ bytes.push(
+ codePoint >> 0x6 | 0xC0,
+ codePoint & 0x3F | 0x80
+ );
+ } else if (codePoint < 0x10000) {
+ if ((units -= 3) < 0) break
+ bytes.push(
+ codePoint >> 0xC | 0xE0,
+ codePoint >> 0x6 & 0x3F | 0x80,
+ codePoint & 0x3F | 0x80
+ );
+ } else if (codePoint < 0x110000) {
+ if ((units -= 4) < 0) break
+ bytes.push(
+ codePoint >> 0x12 | 0xF0,
+ codePoint >> 0xC & 0x3F | 0x80,
+ codePoint >> 0x6 & 0x3F | 0x80,
+ codePoint & 0x3F | 0x80
+ );
+ } else {
+ throw new Error('Invalid code point')
+ }
+ }
+
+ return bytes
+}
+
+function asciiToBytes (str) {
+ var byteArray = [];
+ for (var i = 0; i < str.length; ++i) {
+ // Node's code seems to be doing this and not & 0x7F..
+ byteArray.push(str.charCodeAt(i) & 0xFF);
+ }
+ return byteArray
+}
+
+function utf16leToBytes (str, units) {
+ var c, hi, lo;
+ var byteArray = [];
+ for (var i = 0; i < str.length; ++i) {
+ if ((units -= 2) < 0) break
+
+ c = str.charCodeAt(i);
+ hi = c >> 8;
+ lo = c % 256;
+ byteArray.push(lo);
+ byteArray.push(hi);
+ }
+
+ return byteArray
+}
+
+function base64ToBytes (str) {
+ return base64Js.toByteArray(base64clean(str))
+}
+
+function blitBuffer (src, dst, offset, length) {
+ for (var i = 0; i < length; ++i) {
+ if ((i + offset >= dst.length) || (i >= src.length)) break
+ dst[i + offset] = src[i];
+ }
+ return i
+}
+
+// ArrayBuffers from another context (i.e. an iframe) do not pass the `instanceof` check
+// but they should be treated as valid. See: https://github.com/feross/buffer/issues/166
+function isArrayBuffer (obj) {
+ return obj instanceof ArrayBuffer ||
+ (obj != null && obj.constructor != null && obj.constructor.name === 'ArrayBuffer' &&
+ typeof obj.byteLength === 'number')
+}
+
+// Node 0.10 supports `ArrayBuffer` but lacks `ArrayBuffer.isView`
+function isArrayBufferView (obj) {
+ return (typeof ArrayBuffer.isView === 'function') && ArrayBuffer.isView(obj)
+}
+
+function numberIsNaN (obj) {
+ return obj !== obj // eslint-disable-line no-self-compare
+}
+});
+
+var buffer_1 = buffer.Buffer;
+
+function parseBodyToType$2(res) {
+ var clone = res.clone();
+ return new Promise(function (resolve) {
+ res.json().then(function (data) {
+ return resolve(data);
+ }).catch(function () {
+ return clone.text().then(function (data) {
+ return resolve(data);
+ });
+ });
+ }).then(function (data) {
+ return [res, data];
+ });
+}
+
+function rpcRequest(path, body, auth, host, accessToken, options) {
+ var fetchOptions = {
+ method: 'POST',
+ body: body ? JSON.stringify(body) : null
+ };
+ var headers = {};
+ if (body) {
+ headers['Content-Type'] = 'application/json';
+ }
+ var authHeader = '';
+
+ switch (auth) {
+ case 'app':
+ if (!options.clientId || !options.clientSecret) {
+ throw new Error('A client id and secret is required for this function');
+ }
+ authHeader = new buffer_1(options.clientId + ':' + options.clientSecret).toString('base64');
+ headers.Authorization = 'Basic ' + authHeader;
+ break;
+ case 'team':
+ case 'user':
+ headers.Authorization = 'Bearer ' + accessToken;
+ break;
+ case 'noauth':
+ break;
+ default:
+ throw new Error('Unhandled auth type: ' + auth);
+ }
+
+ if (options) {
+ if (options.selectUser) {
+ headers['Dropbox-API-Select-User'] = options.selectUser;
+ }
+ if (options.selectAdmin) {
+ headers['Dropbox-API-Select-Admin'] = options.selectAdmin;
+ }
+ }
+
+ fetchOptions.headers = headers;
+ return fetch(getBaseURL(host) + path, fetchOptions).then(function (res) {
+ return parseBodyToType$2(res);
+ }).then(function (_ref) {
+ var _ref2 = slicedToArray(_ref, 2),
+ res = _ref2[0],
+ data = _ref2[1];
+
+ // maintaining existing API for error codes not equal to 200 range
+ if (!res.ok) {
+ // eslint-disable-next-line no-throw-literal
+ throw {
+ error: data,
+ response: res,
+ status: res.status
+ };
+ }
+
+ return data;
+ });
+}
+
+/* eslint-disable */
+// Polyfill object.assign for legacy browsers
+// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign
+if (typeof Object.assign !== 'function') {
+ (function () {
+ Object.assign = function (target) {
+ 'use strict';
+
+ var output;
+ var index;
+ var source;
+ var nextKey;
+ if (target === undefined || target === null) {
+ throw new TypeError('Cannot convert undefined or null to object');
+ }
+
+ output = Object(target);
+ for (index = 1; index < arguments.length; index++) {
+ source = arguments[index];
+ if (source !== undefined && source !== null) {
+ for (nextKey in source) {
+ if (source.hasOwnProperty(nextKey)) {
+ output[nextKey] = source[nextKey];
+ }
+ }
+ }
+ }
+ return output;
+ };
+ })();
+}
+
+// Polyfill Array.includes for legacy browsers
+// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/includes
+// https://tc39.github.io/ecma262/#sec-array.prototype.includes
+if (!Array.prototype.includes) {
+ Object.defineProperty(Array.prototype, 'includes', {
+ value: function value(searchElement, fromIndex) {
+
+ if (this == null) {
+ throw new TypeError('"this" is null or not defined');
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ var o = Object(this);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ var len = o.length >>> 0;
+
+ // 3. If len is 0, return false.
+ if (len === 0) {
+ return false;
+ }
+
+ // 4. Let n be ? ToInteger(fromIndex).
+ // (If fromIndex is undefined, this step produces the value 0.)
+ var n = fromIndex | 0;
+
+ // 5. If n ≥ 0, then
+ // a. Let k be n.
+ // 6. Else n < 0,
+ // a. Let k be len + n.
+ // b. If k < 0, let k be 0.
+ var k = Math.max(n >= 0 ? n : len - Math.abs(n), 0);
+
+ function sameValueZero(x, y) {
+ return x === y || typeof x === 'number' && typeof y === 'number' && isNaN(x) && isNaN(y);
+ }
+
+ // 7. Repeat, while k < len
+ while (k < len) {
+ // a. Let elementK be the result of ? Get(O, ! ToString(k)).
+ // b. If SameValueZero(searchElement, elementK) is true, return true.
+ if (sameValueZero(o[k], searchElement)) {
+ return true;
+ }
+ // c. Increase k by 1.
+ k++;
+ }
+
+ // 8. Return false
+ return false;
+ }
+ });
+}
+/* eslint-enable */
+
+/**
+ * @private
+ * @class DropboxBase
+ * @classdesc The main Dropbox SDK class. This contains the methods that are
+ * shared between Dropbox and DropboxTeam classes. It is marked as private so
+ * that it doesn't show up in the docs because it is never used directly.
+ * @arg {Object} options
+ * @arg {String} [options.accessToken] - An access token for making authenticated
+ * requests.
+ * @arg {String} [options.clientId] - The client id for your app. Used to create
+ * authentication URL.
+ * @arg {String} [options.clientSecret] - The client secret for your app.
+ * @arg {Number} [options.selectUser] - User that the team access token would like
+ * to act as.
+ * @arg {String} [options.selectAdmin] - Team admin that the team access token would like
+ * to act as.
+ */
+
+function parseBodyToType(res) {
+ var clone = res.clone();
+ return new Promise(function (resolve) {
+ res.json().then(function (data) {
+ return resolve(data);
+ }).catch(function () {
+ return clone.text().then(function (data) {
+ return resolve(data);
+ });
+ });
+ }).then(function (data) {
+ return [res, data];
+ });
+}
+
+var DropboxBase = function () {
+ function DropboxBase(options) {
+ classCallCheck(this, DropboxBase);
+
+ options = options || {};
+ this.accessToken = options.accessToken;
+ this.clientId = options.clientId;
+ this.clientSecret = options.clientSecret;
+ this.selectUser = options.selectUser;
+ this.selectAdmin = options.selectAdmin;
+ }
+
+ /**
+ * Set the access token used to authenticate requests to the API.
+ * @arg {String} accessToken - An access token
+ * @returns {undefined}
+ */
+
+
+ createClass(DropboxBase, [{
+ key: 'setAccessToken',
+ value: function setAccessToken(accessToken) {
+ this.accessToken = accessToken;
+ }
+
+ /**
+ * Get the access token
+ * @returns {String} Access token
+ */
+
+ }, {
+ key: 'getAccessToken',
+ value: function getAccessToken() {
+ return this.accessToken;
+ }
+
+ /**
+ * Set the client id, which is used to help gain an access token.
+ * @arg {String} clientId - Your apps client id
+ * @returns {undefined}
+ */
+
+ }, {
+ key: 'setClientId',
+ value: function setClientId(clientId) {
+ this.clientId = clientId;
+ }
+
+ /**
+ * Get the client id
+ * @returns {String} Client id
+ */
+
+ }, {
+ key: 'getClientId',
+ value: function getClientId() {
+ return this.clientId;
+ }
+
+ /**
+ * Set the client secret
+ * @arg {String} clientSecret - Your app's client secret
+ * @returns {undefined}
+ */
+
+ }, {
+ key: 'setClientSecret',
+ value: function setClientSecret(clientSecret) {
+ this.clientSecret = clientSecret;
+ }
+
+ /**
+ * Get the client secret
+ * @returns {String} Client secret
+ */
+
+ }, {
+ key: 'getClientSecret',
+ value: function getClientSecret() {
+ return this.clientSecret;
+ }
+
+ /**
+ * Get a URL that can be used to authenticate users for the Dropbox API.
+ * @arg {String} redirectUri - A URL to redirect the user to after
+ * authenticating. This must be added to your app through the admin interface.
+ * @arg {String} [state] - State that will be returned in the redirect URL to help
+ * prevent cross site scripting attacks.
+ * @arg {String} [authType] - auth type, defaults to 'token', other option is 'code'
+ * @returns {String} Url to send user to for Dropbox API authentication
+ */
+
+ }, {
+ key: 'getAuthenticationUrl',
+ value: function getAuthenticationUrl(redirectUri, state) {
+ var authType = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : 'token';
+
+ var clientId = this.getClientId();
+ var baseUrl = 'https://www.dropbox.com/oauth2/authorize';
+
+ if (!clientId) {
+ throw new Error('A client id is required. You can set the client id using .setClientId().');
+ }
+ if (authType !== 'code' && !redirectUri) {
+ throw new Error('A redirect uri is required.');
+ }
+ if (!['code', 'token'].includes(authType)) {
+ throw new Error('Authorization type must be code or token');
+ }
+
+ var authUrl = void 0;
+ if (authType === 'code') {
+ authUrl = baseUrl + '?response_type=code&client_id=' + clientId;
+ } else {
+ authUrl = baseUrl + '?response_type=token&client_id=' + clientId;
+ }
+
+ if (redirectUri) {
+ authUrl += '&redirect_uri=' + redirectUri;
+ }
+ if (state) {
+ authUrl += '&state=' + state;
+ }
+ return authUrl;
+ }
+
+ /**
+ * Get an OAuth2 access token from an OAuth2 Code.
+ * @arg {String} redirectUri - A URL to redirect the user to after
+ * authenticating. This must be added to your app through the admin interface.
+ * @arg {String} code - An OAuth2 code.
+ */
+
+ }, {
+ key: 'getAccessTokenFromCode',
+ value: function getAccessTokenFromCode(redirectUri, code) {
+ var clientId = this.getClientId();
+ var clientSecret = this.getClientSecret();
+
+ if (!clientId) {
+ throw new Error('A client id is required. You can set the client id using .setClientId().');
+ }
+ if (!clientSecret) {
+ throw new Error('A client secret is required. You can set the client id using .setClientSecret().');
+ }
+ var path = 'https://api.dropboxapi.com/oauth2/token?code=' + code + '&grant_type=authorization_code&redirect_uri=' + redirectUri + '&client_id=' + clientId + '&client_secret=' + clientSecret;
+
+ var fetchOptions = {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/x-www-form-urlencoded'
+ }
+ };
+
+ return fetch(path, fetchOptions).then(function (res) {
+ return parseBodyToType(res);
+ }).then(function (_ref) {
+ var _ref2 = slicedToArray(_ref, 2),
+ res = _ref2[0],
+ data = _ref2[1];
+
+ // maintaining existing API for error codes not equal to 200 range
+ if (!res.ok) {
+ // eslint-disable-next-line no-throw-literal
+ throw {
+ error: data,
+ response: res,
+ status: res.status
+ };
+ }
+ return data.access_token;
+ });
+ }
+
+ /**
+ * Called when the authentication succeed
+ * @callback successCallback
+ * @param {string} access_token The application's access token
+ */
+
+ /**
+ * Called when the authentication failed.
+ * @callback errorCallback
+ */
+
+ /**
+ * An authentication process that works with cordova applications.
+ * @param {successCallback} successCallback
+ * @param {errorCallback} errorCallback
+ */
+
+ }, {
+ key: 'authenticateWithCordova',
+ value: function authenticateWithCordova(successCallback, errorCallback) {
+ var redirectUrl = 'https://www.dropbox.com/1/oauth2/redirect_receiver';
+ var url = this.getAuthenticationUrl(redirectUrl);
+
+ var removed = false;
+ var browser = window.open(url, '_blank');
+
+ function onLoadError() {
+ // Try to avoid a browser crash on browser.close().
+ window.setTimeout(function () {
+ browser.close();
+ }, 10);
+ errorCallback();
+ }
+
+ function onLoadStop(event) {
+ var errorLabel = '&error=';
+ var errorIndex = event.url.indexOf(errorLabel);
+
+ if (errorIndex > -1) {
+ // Try to avoid a browser crash on browser.close().
+ window.setTimeout(function () {
+ browser.close();
+ }, 10);
+ errorCallback();
+ } else {
+ var tokenLabel = '#access_token=';
+ var tokenIndex = event.url.indexOf(tokenLabel);
+ var tokenTypeIndex = event.url.indexOf('&token_type=');
+ if (tokenIndex > -1) {
+ tokenIndex += tokenLabel.length;
+ // Try to avoid a browser crash on browser.close().
+ window.setTimeout(function () {
+ browser.close();
+ }, 10);
+
+ var accessToken = event.url.substring(tokenIndex, tokenTypeIndex);
+ successCallback(accessToken);
+ }
+ }
+ }
+
+ function onExit() {
+ if (removed) {
+ return;
+ }
+ browser.removeEventListener('loaderror', onLoadError);
+ browser.removeEventListener('loadstop', onLoadStop);
+ browser.removeEventListener('exit', onExit);
+ removed = true;
+ }
+
+ browser.addEventListener('loaderror', onLoadError);
+ browser.addEventListener('loadstop', onLoadStop);
+ browser.addEventListener('exit', onExit);
+ }
+ }, {
+ key: 'request',
+ value: function request(path, args, auth, host, style) {
+ var request = null;
+ switch (style) {
+ case RPC:
+ request = this.getRpcRequest();
+ break;
+ case DOWNLOAD:
+ request = this.getDownloadRequest();
+ break;
+ case UPLOAD:
+ request = this.getUploadRequest();
+ break;
+ default:
+ throw new Error('Invalid request style: ' + style);
+ }
+ var options = {
+ selectUser: this.selectUser,
+ selectAdmin: this.selectAdmin,
+ clientId: this.getClientId(),
+ clientSecret: this.getClientSecret()
+ };
+ return request(path, args, auth, host, this.getAccessToken(), options);
+ }
+ }, {
+ key: 'setRpcRequest',
+ value: function setRpcRequest(newRpcRequest) {
+ this.rpcRequest = newRpcRequest;
+ }
+ }, {
+ key: 'getRpcRequest',
+ value: function getRpcRequest() {
+ if (this.rpcRequest === undefined) {
+ this.rpcRequest = rpcRequest;
+ }
+ return this.rpcRequest;
+ }
+ }, {
+ key: 'setDownloadRequest',
+ value: function setDownloadRequest(newDownloadRequest) {
+ this.downloadRequest = newDownloadRequest;
+ }
+ }, {
+ key: 'getDownloadRequest',
+ value: function getDownloadRequest() {
+ if (this.downloadRequest === undefined) {
+ this.downloadRequest = downloadRequest;
+ }
+ return this.downloadRequest;
+ }
+ }, {
+ key: 'setUploadRequest',
+ value: function setUploadRequest(newUploadRequest) {
+ this.uploadRequest = newUploadRequest;
+ }
+ }, {
+ key: 'getUploadRequest',
+ value: function getUploadRequest() {
+ if (this.uploadRequest === undefined) {
+ this.uploadRequest = uploadRequest;
+ }
+ return this.uploadRequest;
+ }
+ }]);
+ return DropboxBase;
+}();
+
+/**
+ * @class Dropbox
+ * @extends DropboxBase
+ * @classdesc The Dropbox SDK class that provides methods to read, write and
+ * create files or folders in a user's Dropbox.
+ * @arg {Object} options
+ * @arg {String} [options.accessToken] - An access token for making authenticated
+ * requests.
+ * @arg {String} [options.clientId] - The client id for your app. Used to create
+ * authentication URL.
+ * @arg {String} [options.selectUser] - Select user is only used by DropboxTeam.
+ * It specifies which user the team access token should be acting as.
+ */
+var Dropbox = function (_DropboxBase) {
+ inherits(Dropbox, _DropboxBase);
+
+ function Dropbox(options) {
+ classCallCheck(this, Dropbox);
+
+ var _this = possibleConstructorReturn(this, (Dropbox.__proto__ || Object.getPrototypeOf(Dropbox)).call(this, options));
+
+ Object.assign(_this, routes);
+ return _this;
+ }
+
+ createClass(Dropbox, [{
+ key: 'filesGetSharedLinkFile',
+ value: function filesGetSharedLinkFile(arg) {
+ return this.request('sharing/get_shared_link_file', arg, 'api', 'download');
+ }
+ }]);
+ return Dropbox;
+}(DropboxBase);
+
+
+
+var dropbox = Object.freeze({
+ Dropbox: Dropbox
+});
+
+// Auto-generated by Stone, do not modify.
+var routes$1 = {};
+
+/**
+ * List all device sessions of a team's member.
+ * @function DropboxTeam#teamDevicesListMemberDevices
+ * @arg {TeamListMemberDevicesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamDevicesListMemberDevices = function (arg) {
+ return this.request('team/devices/list_member_devices', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * List all device sessions of a team.
+ * @function DropboxTeam#teamDevicesListMembersDevices
+ * @arg {TeamListMembersDevicesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamDevicesListMembersDevices = function (arg) {
+ return this.request('team/devices/list_members_devices', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * List all device sessions of a team.
+ * @function DropboxTeam#teamDevicesListTeamDevices
+ * @deprecated
+ * @arg {TeamListTeamDevicesArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamDevicesListTeamDevices = function (arg) {
+ return this.request('team/devices/list_team_devices', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Revoke a device session of a team's member.
+ * @function DropboxTeam#teamDevicesRevokeDeviceSession
+ * @arg {TeamRevokeDeviceSessionArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamDevicesRevokeDeviceSession = function (arg) {
+ return this.request('team/devices/revoke_device_session', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Revoke a list of device sessions of team members.
+ * @function DropboxTeam#teamDevicesRevokeDeviceSessionBatch
+ * @arg {TeamRevokeDeviceSessionBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamDevicesRevokeDeviceSessionBatch = function (arg) {
+ return this.request('team/devices/revoke_device_session_batch', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Get the values for one or more featues. This route allows you to check your
+ * account's capability for what feature you can access or what value you have
+ * for certain features. Permission : Team information.
+ * @function DropboxTeam#teamFeaturesGetValues
+ * @arg {TeamFeaturesGetValuesBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamFeaturesGetValues = function (arg) {
+ return this.request('team/features/get_values', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Retrieves information about a team.
+ * @function DropboxTeam#teamGetInfo
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGetInfo = function (arg) {
+ return this.request('team/get_info', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Creates a new, empty group, with a requested name. Permission : Team member
+ * management.
+ * @function DropboxTeam#teamGroupsCreate
+ * @arg {TeamGroupCreateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsCreate = function (arg) {
+ return this.request('team/groups/create', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Deletes a group. The group is deleted immediately. However the revoking of
+ * group-owned resources may take additional time. Use the groups/job_status/get
+ * to determine whether this process has completed. Permission : Team member
+ * management.
+ * @function DropboxTeam#teamGroupsDelete
+ * @arg {TeamGroupSelector} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsDelete = function (arg) {
+ return this.request('team/groups/delete', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Retrieves information about one or more groups. Note that the optional field
+ * GroupFullInfo.members is not returned for system-managed groups. Permission :
+ * Team Information.
+ * @function DropboxTeam#teamGroupsGetInfo
+ * @arg {TeamGroupsSelector} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsGetInfo = function (arg) {
+ return this.request('team/groups/get_info', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once an async_job_id is returned from groups/delete, groups/members/add , or
+ * groups/members/remove use this method to poll the status of granting/revoking
+ * group members' access to group-owned resources. Permission : Team member
+ * management.
+ * @function DropboxTeam#teamGroupsJobStatusGet
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsJobStatusGet = function (arg) {
+ return this.request('team/groups/job_status/get', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Lists groups on a team. Permission : Team Information.
+ * @function DropboxTeam#teamGroupsList
+ * @arg {TeamGroupsListArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsList = function (arg) {
+ return this.request('team/groups/list', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from groups/list, use this to paginate
+ * through all groups. Permission : Team Information.
+ * @function DropboxTeam#teamGroupsListContinue
+ * @arg {TeamGroupsListContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsListContinue = function (arg) {
+ return this.request('team/groups/list/continue', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Adds members to a group. The members are added immediately. However the
+ * granting of group-owned resources may take additional time. Use the
+ * groups/job_status/get to determine whether this process has completed.
+ * Permission : Team member management.
+ * @function DropboxTeam#teamGroupsMembersAdd
+ * @arg {TeamGroupMembersAddArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsMembersAdd = function (arg) {
+ return this.request('team/groups/members/add', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Lists members of a group. Permission : Team Information.
+ * @function DropboxTeam#teamGroupsMembersList
+ * @arg {TeamGroupsMembersListArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsMembersList = function (arg) {
+ return this.request('team/groups/members/list', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from groups/members/list, use this to
+ * paginate through all members of the group. Permission : Team information.
+ * @function DropboxTeam#teamGroupsMembersListContinue
+ * @arg {TeamGroupsMembersListContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsMembersListContinue = function (arg) {
+ return this.request('team/groups/members/list/continue', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Removes members from a group. The members are removed immediately. However
+ * the revoking of group-owned resources may take additional time. Use the
+ * groups/job_status/get to determine whether this process has completed. This
+ * method permits removing the only owner of a group, even in cases where this
+ * is not possible via the web client. Permission : Team member management.
+ * @function DropboxTeam#teamGroupsMembersRemove
+ * @arg {TeamGroupMembersRemoveArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsMembersRemove = function (arg) {
+ return this.request('team/groups/members/remove', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Sets a member's access type in a group. Permission : Team member management.
+ * @function DropboxTeam#teamGroupsMembersSetAccessType
+ * @arg {TeamGroupMembersSetAccessTypeArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsMembersSetAccessType = function (arg) {
+ return this.request('team/groups/members/set_access_type', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Updates a group's name and/or external ID. Permission : Team member
+ * management.
+ * @function DropboxTeam#teamGroupsUpdate
+ * @arg {TeamGroupUpdateArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamGroupsUpdate = function (arg) {
+ return this.request('team/groups/update', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * List all linked applications of the team member. Note, this endpoint does not
+ * list any team-linked applications.
+ * @function DropboxTeam#teamLinkedAppsListMemberLinkedApps
+ * @arg {TeamListMemberAppsArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamLinkedAppsListMemberLinkedApps = function (arg) {
+ return this.request('team/linked_apps/list_member_linked_apps', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * List all applications linked to the team members' accounts. Note, this
+ * endpoint does not list any team-linked applications.
+ * @function DropboxTeam#teamLinkedAppsListMembersLinkedApps
+ * @arg {TeamListMembersAppsArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamLinkedAppsListMembersLinkedApps = function (arg) {
+ return this.request('team/linked_apps/list_members_linked_apps', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * List all applications linked to the team members' accounts. Note, this
+ * endpoint doesn't list any team-linked applications.
+ * @function DropboxTeam#teamLinkedAppsListTeamLinkedApps
+ * @deprecated
+ * @arg {TeamListTeamAppsArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamLinkedAppsListTeamLinkedApps = function (arg) {
+ return this.request('team/linked_apps/list_team_linked_apps', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Revoke a linked application of the team member.
+ * @function DropboxTeam#teamLinkedAppsRevokeLinkedApp
+ * @arg {TeamRevokeLinkedApiAppArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamLinkedAppsRevokeLinkedApp = function (arg) {
+ return this.request('team/linked_apps/revoke_linked_app', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Revoke a list of linked applications of the team members.
+ * @function DropboxTeam#teamLinkedAppsRevokeLinkedAppBatch
+ * @arg {TeamRevokeLinkedApiAppBatchArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamLinkedAppsRevokeLinkedAppBatch = function (arg) {
+ return this.request('team/linked_apps/revoke_linked_app_batch', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Add users to member space limits excluded users list.
+ * @function DropboxTeam#teamMemberSpaceLimitsExcludedUsersAdd
+ * @arg {TeamExcludedUsersUpdateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMemberSpaceLimitsExcludedUsersAdd = function (arg) {
+ return this.request('team/member_space_limits/excluded_users/add', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * List member space limits excluded users.
+ * @function DropboxTeam#teamMemberSpaceLimitsExcludedUsersList
+ * @arg {TeamExcludedUsersListArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMemberSpaceLimitsExcludedUsersList = function (arg) {
+ return this.request('team/member_space_limits/excluded_users/list', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Continue listing member space limits excluded users.
+ * @function DropboxTeam#teamMemberSpaceLimitsExcludedUsersListContinue
+ * @arg {TeamExcludedUsersListContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMemberSpaceLimitsExcludedUsersListContinue = function (arg) {
+ return this.request('team/member_space_limits/excluded_users/list/continue', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Remove users from member space limits excluded users list.
+ * @function DropboxTeam#teamMemberSpaceLimitsExcludedUsersRemove
+ * @arg {TeamExcludedUsersUpdateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMemberSpaceLimitsExcludedUsersRemove = function (arg) {
+ return this.request('team/member_space_limits/excluded_users/remove', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Get users custom quota. Returns none as the custom quota if none was set. A
+ * maximum of 1000 members can be specified in a single call.
+ * @function DropboxTeam#teamMemberSpaceLimitsGetCustomQuota
+ * @arg {TeamCustomQuotaUsersArg} arg - The request parameters.
+ * @returns {Promise., Error.>}
+ */
+routes$1.teamMemberSpaceLimitsGetCustomQuota = function (arg) {
+ return this.request('team/member_space_limits/get_custom_quota', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Remove users custom quota. A maximum of 1000 members can be specified in a
+ * single call.
+ * @function DropboxTeam#teamMemberSpaceLimitsRemoveCustomQuota
+ * @arg {TeamCustomQuotaUsersArg} arg - The request parameters.
+ * @returns {Promise., Error.>}
+ */
+routes$1.teamMemberSpaceLimitsRemoveCustomQuota = function (arg) {
+ return this.request('team/member_space_limits/remove_custom_quota', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Set users custom quota. Custom quota has to be at least 15GB. A maximum of
+ * 1000 members can be specified in a single call.
+ * @function DropboxTeam#teamMemberSpaceLimitsSetCustomQuota
+ * @arg {TeamSetCustomQuotaArg} arg - The request parameters.
+ * @returns {Promise., Error.>}
+ */
+routes$1.teamMemberSpaceLimitsSetCustomQuota = function (arg) {
+ return this.request('team/member_space_limits/set_custom_quota', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Adds members to a team. Permission : Team member management A maximum of 20
+ * members can be specified in a single call. If no Dropbox account exists with
+ * the email address specified, a new Dropbox account will be created with the
+ * given email address, and that account will be invited to the team. If a
+ * personal Dropbox account exists with the email address specified in the call,
+ * this call will create a placeholder Dropbox account for the user on the team
+ * and send an email inviting the user to migrate their existing personal
+ * account onto the team. Team member management apps are required to set an
+ * initial given_name and surname for a user to use in the team invitation and
+ * for 'Perform as team member' actions taken on the user before they become
+ * 'active'.
+ * @function DropboxTeam#teamMembersAdd
+ * @arg {TeamMembersAddArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersAdd = function (arg) {
+ return this.request('team/members/add', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once an async_job_id is returned from members/add , use this to poll the
+ * status of the asynchronous request. Permission : Team member management.
+ * @function DropboxTeam#teamMembersAddJobStatusGet
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersAddJobStatusGet = function (arg) {
+ return this.request('team/members/add/job_status/get', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Returns information about multiple team members. Permission : Team
+ * information This endpoint will return MembersGetInfoItem.id_not_found, for
+ * IDs (or emails) that cannot be matched to a valid team member.
+ * @function DropboxTeam#teamMembersGetInfo
+ * @arg {TeamMembersGetInfoArgs} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersGetInfo = function (arg) {
+ return this.request('team/members/get_info', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Lists members of a team. Permission : Team information.
+ * @function DropboxTeam#teamMembersList
+ * @arg {TeamMembersListArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersList = function (arg) {
+ return this.request('team/members/list', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from members/list, use this to paginate
+ * through all team members. Permission : Team information.
+ * @function DropboxTeam#teamMembersListContinue
+ * @arg {TeamMembersListContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersListContinue = function (arg) {
+ return this.request('team/members/list/continue', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Recover a deleted member. Permission : Team member management Exactly one of
+ * team_member_id, email, or external_id must be provided to identify the user
+ * account.
+ * @function DropboxTeam#teamMembersRecover
+ * @arg {TeamMembersRecoverArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersRecover = function (arg) {
+ return this.request('team/members/recover', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Removes a member from a team. Permission : Team member management Exactly one
+ * of team_member_id, email, or external_id must be provided to identify the
+ * user account. Accounts can be recovered via members/recover for a 7 day
+ * period or until the account has been permanently deleted or transferred to
+ * another account (whichever comes first). Calling members/add while a user is
+ * still recoverable on your team will return with
+ * MemberAddResult.user_already_on_team. Accounts can have their files
+ * transferred via the admin console for a limited time, based on the version
+ * history length associated with the team (120 days for most teams). This
+ * endpoint may initiate an asynchronous job. To obtain the final result of the
+ * job, the client should periodically poll members/remove/job_status/get.
+ * @function DropboxTeam#teamMembersRemove
+ * @arg {TeamMembersRemoveArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersRemove = function (arg) {
+ return this.request('team/members/remove', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once an async_job_id is returned from members/remove , use this to poll the
+ * status of the asynchronous request. Permission : Team member management.
+ * @function DropboxTeam#teamMembersRemoveJobStatusGet
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersRemoveJobStatusGet = function (arg) {
+ return this.request('team/members/remove/job_status/get', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Sends welcome email to pending team member. Permission : Team member
+ * management Exactly one of team_member_id, email, or external_id must be
+ * provided to identify the user account. No-op if team member is not pending.
+ * @function DropboxTeam#teamMembersSendWelcomeEmail
+ * @arg {TeamUserSelectorArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersSendWelcomeEmail = function (arg) {
+ return this.request('team/members/send_welcome_email', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Updates a team member's permissions. Permission : Team member management.
+ * @function DropboxTeam#teamMembersSetAdminPermissions
+ * @arg {TeamMembersSetPermissionsArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersSetAdminPermissions = function (arg) {
+ return this.request('team/members/set_admin_permissions', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Updates a team member's profile. Permission : Team member management.
+ * @function DropboxTeam#teamMembersSetProfile
+ * @arg {TeamMembersSetProfileArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersSetProfile = function (arg) {
+ return this.request('team/members/set_profile', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Suspend a member from a team. Permission : Team member management Exactly one
+ * of team_member_id, email, or external_id must be provided to identify the
+ * user account.
+ * @function DropboxTeam#teamMembersSuspend
+ * @arg {TeamMembersDeactivateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersSuspend = function (arg) {
+ return this.request('team/members/suspend', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Unsuspend a member from a team. Permission : Team member management Exactly
+ * one of team_member_id, email, or external_id must be provided to identify the
+ * user account.
+ * @function DropboxTeam#teamMembersUnsuspend
+ * @arg {TeamMembersUnsuspendArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamMembersUnsuspend = function (arg) {
+ return this.request('team/members/unsuspend', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Returns a list of all team-accessible namespaces. This list includes team
+ * folders, shared folders containing team members, team members' home
+ * namespaces, and team members' app folders. Home namespaces and app folders
+ * are always owned by this team or members of the team, but shared folders may
+ * be owned by other users or other teams. Duplicates may occur in the list.
+ * @function DropboxTeam#teamNamespacesList
+ * @arg {TeamTeamNamespacesListArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamNamespacesList = function (arg) {
+ return this.request('team/namespaces/list', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from namespaces/list, use this to paginate
+ * through all team-accessible namespaces. Duplicates may occur in the list.
+ * @function DropboxTeam#teamNamespacesListContinue
+ * @arg {TeamTeamNamespacesListContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamNamespacesListContinue = function (arg) {
+ return this.request('team/namespaces/list/continue', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * @function DropboxTeam#teamPropertiesTemplateAdd
+ * @deprecated
+ * @arg {FilePropertiesAddTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamPropertiesTemplateAdd = function (arg) {
+ return this.request('team/properties/template/add', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * @function DropboxTeam#teamPropertiesTemplateGet
+ * @deprecated
+ * @arg {FilePropertiesGetTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamPropertiesTemplateGet = function (arg) {
+ return this.request('team/properties/template/get', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * @function DropboxTeam#teamPropertiesTemplateList
+ * @deprecated
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamPropertiesTemplateList = function (arg) {
+ return this.request('team/properties/template/list', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * @function DropboxTeam#teamPropertiesTemplateUpdate
+ * @deprecated
+ * @arg {FilePropertiesUpdateTemplateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamPropertiesTemplateUpdate = function (arg) {
+ return this.request('team/properties/template/update', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Retrieves reporting data about a team's user activity.
+ * @function DropboxTeam#teamReportsGetActivity
+ * @arg {TeamDateRange} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamReportsGetActivity = function (arg) {
+ return this.request('team/reports/get_activity', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Retrieves reporting data about a team's linked devices.
+ * @function DropboxTeam#teamReportsGetDevices
+ * @arg {TeamDateRange} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamReportsGetDevices = function (arg) {
+ return this.request('team/reports/get_devices', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Retrieves reporting data about a team's membership.
+ * @function DropboxTeam#teamReportsGetMembership
+ * @arg {TeamDateRange} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamReportsGetMembership = function (arg) {
+ return this.request('team/reports/get_membership', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Retrieves reporting data about a team's storage usage.
+ * @function DropboxTeam#teamReportsGetStorage
+ * @arg {TeamDateRange} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamReportsGetStorage = function (arg) {
+ return this.request('team/reports/get_storage', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Sets an archived team folder's status to active. Permission : Team member
+ * file access.
+ * @function DropboxTeam#teamTeamFolderActivate
+ * @arg {TeamTeamFolderIdArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderActivate = function (arg) {
+ return this.request('team/team_folder/activate', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Sets an active team folder's status to archived and removes all folder and
+ * file members. Permission : Team member file access.
+ * @function DropboxTeam#teamTeamFolderArchive
+ * @arg {TeamTeamFolderArchiveArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderArchive = function (arg) {
+ return this.request('team/team_folder/archive', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Returns the status of an asynchronous job for archiving a team folder.
+ * Permission : Team member file access.
+ * @function DropboxTeam#teamTeamFolderArchiveCheck
+ * @arg {AsyncPollArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderArchiveCheck = function (arg) {
+ return this.request('team/team_folder/archive/check', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Creates a new, active, team folder with no members. Permission : Team member
+ * file access.
+ * @function DropboxTeam#teamTeamFolderCreate
+ * @arg {TeamTeamFolderCreateArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderCreate = function (arg) {
+ return this.request('team/team_folder/create', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Retrieves metadata for team folders. Permission : Team member file access.
+ * @function DropboxTeam#teamTeamFolderGetInfo
+ * @arg {TeamTeamFolderIdListArg} arg - The request parameters.
+ * @returns {Promise., Error.>}
+ */
+routes$1.teamTeamFolderGetInfo = function (arg) {
+ return this.request('team/team_folder/get_info', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Lists all team folders. Permission : Team member file access.
+ * @function DropboxTeam#teamTeamFolderList
+ * @arg {TeamTeamFolderListArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderList = function (arg) {
+ return this.request('team/team_folder/list', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Once a cursor has been retrieved from team_folder/list, use this to paginate
+ * through all team folders. Permission : Team member file access.
+ * @function DropboxTeam#teamTeamFolderListContinue
+ * @arg {TeamTeamFolderListContinueArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderListContinue = function (arg) {
+ return this.request('team/team_folder/list/continue', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Permanently deletes an archived team folder. Permission : Team member file
+ * access.
+ * @function DropboxTeam#teamTeamFolderPermanentlyDelete
+ * @arg {TeamTeamFolderIdArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderPermanentlyDelete = function (arg) {
+ return this.request('team/team_folder/permanently_delete', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Changes an active team folder's name. Permission : Team member file access.
+ * @function DropboxTeam#teamTeamFolderRename
+ * @arg {TeamTeamFolderRenameArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderRename = function (arg) {
+ return this.request('team/team_folder/rename', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Updates the sync settings on a team folder or its contents. Use of this
+ * endpoint requires that the team has team selective sync enabled.
+ * @function DropboxTeam#teamTeamFolderUpdateSyncSettings
+ * @arg {TeamTeamFolderUpdateSyncSettingsArg} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTeamFolderUpdateSyncSettings = function (arg) {
+ return this.request('team/team_folder/update_sync_settings', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * Returns the member profile of the admin who generated the team access token
+ * used to make the call.
+ * @function DropboxTeam#teamTokenGetAuthenticatedAdmin
+ * @arg {void} arg - The request parameters.
+ * @returns {Promise.>}
+ */
+routes$1.teamTokenGetAuthenticatedAdmin = function (arg) {
+ return this.request('team/token/get_authenticated_admin', arg, 'team', 'api', 'rpc');
+};
+
+/**
+ * @class DropboxTeam
+ * @extends DropboxBase
+ * @classdesc The Dropbox SDK class that provides access to team endpoints.
+ * @arg {Object} options
+ * @arg {String} [options.accessToken] - An access token for making authenticated
+ * requests.
+ * @arg {String} [options.clientId] - The client id for your app. Used to create
+ * authentication URL.
+ */
+var DropboxTeam = function (_DropboxBase) {
+ inherits(DropboxTeam, _DropboxBase);
+
+ function DropboxTeam(options) {
+ classCallCheck(this, DropboxTeam);
+
+ var _this = possibleConstructorReturn(this, (DropboxTeam.__proto__ || Object.getPrototypeOf(DropboxTeam)).call(this, options));
+
+ Object.assign(_this, routes$1);
+ return _this;
+ }
+
+ /**
+ * Returns an instance of Dropbox that can make calls to user api endpoints on
+ * behalf of the passed user id, using the team access token.
+ * @arg {String} userId - The user id to use the Dropbox class as
+ * @returns {Dropbox} An instance of Dropbox used to make calls to user api
+ * endpoints
+ */
+
+
+ createClass(DropboxTeam, [{
+ key: 'actAsUser',
+ value: function actAsUser(userId) {
+ return new Dropbox({
+ accessToken: this.accessToken,
+ clientId: this.clientId,
+ selectUser: userId
+ });
+ }
+ }]);
+ return DropboxTeam;
+}(DropboxBase);
+
+
+
+var dropboxTeam = Object.freeze({
+ DropboxTeam: DropboxTeam
+});
+
+var src = {
+ Dropbox: dropbox.Dropbox,
+ DropboxTeam: dropboxTeam.DropboxTeam
+};
+
+return src;
+
+})));
+//# sourceMappingURL=Dropbox-sdk.js.map
diff --git a/sync/vendor/zipjs/LICENSE b/sync/vendor/zipjs/LICENSE
new file mode 100755
index 00000000..2c3ff4a1
--- /dev/null
+++ b/sync/vendor/zipjs/LICENSE
@@ -0,0 +1,22 @@
+/*
+ Copyright (c) 2013 Gildas Lormeau. All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the distribution.
+ 3. The names of the authors may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+ INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+ OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/sync/vendor/zipjs/deflate.js b/sync/vendor/zipjs/deflate.js
new file mode 100755
index 00000000..8ac3c0af
--- /dev/null
+++ b/sync/vendor/zipjs/deflate.js
@@ -0,0 +1,2060 @@
+/*
+ Copyright (c) 2013 Gildas Lormeau. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the distribution.
+
+ 3. The names of the authors may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+ INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+ OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This program is based on JZlib 1.0.2 ymnk, JCraft,Inc.
+ * JZlib is based on zlib-1.1.3, so all credit should go authors
+ * Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+ * and contributors of zlib.
+ */
+
+(function(global) {
+ "use strict";
+
+ // Global
+
+ var MAX_BITS = 15;
+ var D_CODES = 30;
+ var BL_CODES = 19;
+
+ var LENGTH_CODES = 29;
+ var LITERALS = 256;
+ var L_CODES = (LITERALS + 1 + LENGTH_CODES);
+ var HEAP_SIZE = (2 * L_CODES + 1);
+
+ var END_BLOCK = 256;
+
+ // Bit length codes must not exceed MAX_BL_BITS bits
+ var MAX_BL_BITS = 7;
+
+ // repeat previous bit length 3-6 times (2 bits of repeat count)
+ var REP_3_6 = 16;
+
+ // repeat a zero length 3-10 times (3 bits of repeat count)
+ var REPZ_3_10 = 17;
+
+ // repeat a zero length 11-138 times (7 bits of repeat count)
+ var REPZ_11_138 = 18;
+
+ // The lengths of the bit length codes are sent in order of decreasing
+ // probability, to avoid transmitting the lengths for unused bit
+ // length codes.
+
+ var Buf_size = 8 * 2;
+
+ // JZlib version : "1.0.2"
+ var Z_DEFAULT_COMPRESSION = -1;
+
+ // compression strategy
+ var Z_FILTERED = 1;
+ var Z_HUFFMAN_ONLY = 2;
+ var Z_DEFAULT_STRATEGY = 0;
+
+ var Z_NO_FLUSH = 0;
+ var Z_PARTIAL_FLUSH = 1;
+ var Z_FULL_FLUSH = 3;
+ var Z_FINISH = 4;
+
+ var Z_OK = 0;
+ var Z_STREAM_END = 1;
+ var Z_NEED_DICT = 2;
+ var Z_STREAM_ERROR = -2;
+ var Z_DATA_ERROR = -3;
+ var Z_BUF_ERROR = -5;
+
+ // Tree
+
+ // see definition of array dist_code below
+ var _dist_code = [ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 ];
+
+ function Tree() {
+ var that = this;
+
+ // dyn_tree; // the dynamic tree
+ // max_code; // largest code with non zero frequency
+ // stat_desc; // the corresponding static tree
+
+ // Compute the optimal bit lengths for a tree and update the total bit
+ // length
+ // for the current block.
+ // IN assertion: the fields freq and dad are set, heap[heap_max] and
+ // above are the tree nodes sorted by increasing frequency.
+ // OUT assertions: the field len is set to the optimal bit length, the
+ // array bl_count contains the frequencies for each bit length.
+ // The length opt_len is updated; static_len is also updated if stree is
+ // not null.
+ function gen_bitlen(s) {
+ var tree = that.dyn_tree;
+ var stree = that.stat_desc.static_tree;
+ var extra = that.stat_desc.extra_bits;
+ var base = that.stat_desc.extra_base;
+ var max_length = that.stat_desc.max_length;
+ var h; // heap index
+ var n, m; // iterate over the tree elements
+ var bits; // bit length
+ var xbits; // extra bits
+ var f; // frequency
+ var overflow = 0; // number of elements with bit length too large
+
+ for (bits = 0; bits <= MAX_BITS; bits++)
+ s.bl_count[bits] = 0;
+
+ // In a first pass, compute the optimal bit lengths (which may
+ // overflow in the case of the bit length tree).
+ tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap
+
+ for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {
+ n = s.heap[h];
+ bits = tree[tree[n * 2 + 1] * 2 + 1] + 1;
+ if (bits > max_length) {
+ bits = max_length;
+ overflow++;
+ }
+ tree[n * 2 + 1] = bits;
+ // We overwrite tree[n*2+1] which is no longer needed
+
+ if (n > that.max_code)
+ continue; // not a leaf node
+
+ s.bl_count[bits]++;
+ xbits = 0;
+ if (n >= base)
+ xbits = extra[n - base];
+ f = tree[n * 2];
+ s.opt_len += f * (bits + xbits);
+ if (stree)
+ s.static_len += f * (stree[n * 2 + 1] + xbits);
+ }
+ if (overflow === 0)
+ return;
+
+ // This happens for example on obj2 and pic of the Calgary corpus
+ // Find the first bit length which could increase:
+ do {
+ bits = max_length - 1;
+ while (s.bl_count[bits] === 0)
+ bits--;
+ s.bl_count[bits]--; // move one leaf down the tree
+ s.bl_count[bits + 1] += 2; // move one overflow item as its brother
+ s.bl_count[max_length]--;
+ // The brother of the overflow item also moves one step up,
+ // but this does not affect bl_count[max_length]
+ overflow -= 2;
+ } while (overflow > 0);
+
+ for (bits = max_length; bits !== 0; bits--) {
+ n = s.bl_count[bits];
+ while (n !== 0) {
+ m = s.heap[--h];
+ if (m > that.max_code)
+ continue;
+ if (tree[m * 2 + 1] != bits) {
+ s.opt_len += (bits - tree[m * 2 + 1]) * tree[m * 2];
+ tree[m * 2 + 1] = bits;
+ }
+ n--;
+ }
+ }
+ }
+
+ // Reverse the first len bits of a code, using straightforward code (a
+ // faster
+ // method would use a table)
+ // IN assertion: 1 <= len <= 15
+ function bi_reverse(code, // the value to invert
+ len // its bit length
+ ) {
+ var res = 0;
+ do {
+ res |= code & 1;
+ code >>>= 1;
+ res <<= 1;
+ } while (--len > 0);
+ return res >>> 1;
+ }
+
+ // Generate the codes for a given tree and bit counts (which need not be
+ // optimal).
+ // IN assertion: the array bl_count contains the bit length statistics for
+ // the given tree and the field len is set for all tree elements.
+ // OUT assertion: the field code is set for all tree elements of non
+ // zero code length.
+ function gen_codes(tree, // the tree to decorate
+ max_code, // largest code with non zero frequency
+ bl_count // number of codes at each bit length
+ ) {
+ var next_code = []; // next code value for each
+ // bit length
+ var code = 0; // running code value
+ var bits; // bit index
+ var n; // code index
+ var len;
+
+ // The distribution counts are first used to generate the code values
+ // without bit reversal.
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ next_code[bits] = code = ((code + bl_count[bits - 1]) << 1);
+ }
+
+ // Check that the bit counts in bl_count are consistent. The last code
+ // must be all ones.
+ // Assert (code + bl_count[MAX_BITS]-1 == (1<= 1; n--)
+ s.pqdownheap(tree, n);
+
+ // Construct the Huffman tree by repeatedly combining the least two
+ // frequent nodes.
+
+ node = elems; // next internal node of the tree
+ do {
+ // n = node of least frequency
+ n = s.heap[1];
+ s.heap[1] = s.heap[s.heap_len--];
+ s.pqdownheap(tree, 1);
+ m = s.heap[1]; // m = node of next least frequency
+
+ s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency
+ s.heap[--s.heap_max] = m;
+
+ // Create a new node father of n and m
+ tree[node * 2] = (tree[n * 2] + tree[m * 2]);
+ s.depth[node] = Math.max(s.depth[n], s.depth[m]) + 1;
+ tree[n * 2 + 1] = tree[m * 2 + 1] = node;
+
+ // and insert the new node in the heap
+ s.heap[1] = node++;
+ s.pqdownheap(tree, 1);
+ } while (s.heap_len >= 2);
+
+ s.heap[--s.heap_max] = s.heap[1];
+
+ // At this point, the fields freq and dad are set. We can now
+ // generate the bit lengths.
+
+ gen_bitlen(s);
+
+ // The field len is now set, we can generate the bit codes
+ gen_codes(tree, that.max_code, s.bl_count);
+ };
+
+ }
+
+ Tree._length_code = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 ];
+
+ Tree.base_length = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0 ];
+
+ Tree.base_dist = [ 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384,
+ 24576 ];
+
+ // Mapping from a distance to a distance code. dist is the distance - 1 and
+ // must not have side effects. _dist_code[256] and _dist_code[257] are never
+ // used.
+ Tree.d_code = function(dist) {
+ return ((dist) < 256 ? _dist_code[dist] : _dist_code[256 + ((dist) >>> 7)]);
+ };
+
+ // extra bits for each length code
+ Tree.extra_lbits = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 ];
+
+ // extra bits for each distance code
+ Tree.extra_dbits = [ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 ];
+
+ // extra bits for each bit length code
+ Tree.extra_blbits = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7 ];
+
+ Tree.bl_order = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];
+
+ // StaticTree
+
+ function StaticTree(static_tree, extra_bits, extra_base, elems, max_length) {
+ var that = this;
+ that.static_tree = static_tree;
+ that.extra_bits = extra_bits;
+ that.extra_base = extra_base;
+ that.elems = elems;
+ that.max_length = max_length;
+ }
+
+ StaticTree.static_ltree = [ 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, 8, 108, 8, 236, 8, 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, 8, 124, 8, 252, 8, 2, 8,
+ 130, 8, 66, 8, 194, 8, 34, 8, 162, 8, 98, 8, 226, 8, 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, 8, 114, 8, 242, 8, 10, 8, 138, 8, 74, 8, 202, 8, 42,
+ 8, 170, 8, 106, 8, 234, 8, 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, 8, 122, 8, 250, 8, 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8, 102, 8, 230, 8,
+ 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, 8, 118, 8, 246, 8, 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, 8, 110, 8, 238, 8, 30, 8, 158, 8, 94, 8,
+ 222, 8, 62, 8, 190, 8, 126, 8, 254, 8, 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8, 97, 8, 225, 8, 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, 8, 113,
+ 8, 241, 8, 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8, 105, 8, 233, 8, 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, 8, 121, 8, 249, 8, 5, 8, 133, 8,
+ 69, 8, 197, 8, 37, 8, 165, 8, 101, 8, 229, 8, 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, 8, 117, 8, 245, 8, 13, 8, 141, 8, 77, 8, 205, 8, 45, 8,
+ 173, 8, 109, 8, 237, 8, 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, 8, 125, 8, 253, 8, 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, 9, 211, 9, 467, 9,
+ 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371, 9, 243, 9, 499, 9, 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, 9, 203, 9, 459, 9, 43, 9, 299, 9, 171, 9,
+ 427, 9, 107, 9, 363, 9, 235, 9, 491, 9, 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, 9, 219, 9, 475, 9, 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379,
+ 9, 251, 9, 507, 9, 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, 9, 199, 9, 455, 9, 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359, 9, 231, 9, 487, 9, 23,
+ 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, 9, 215, 9, 471, 9, 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375, 9, 247, 9, 503, 9, 15, 9, 271, 9, 143, 9,
+ 399, 9, 79, 9, 335, 9, 207, 9, 463, 9, 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367, 9, 239, 9, 495, 9, 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, 9,
+ 223, 9, 479, 9, 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383, 9, 255, 9, 511, 9, 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, 48, 7, 112, 7, 8, 7, 72, 7,
+ 40, 7, 104, 7, 24, 7, 88, 7, 56, 7, 120, 7, 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, 52, 7, 116, 7, 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8,
+ 99, 8, 227, 8 ];
+
+ StaticTree.static_dtree = [ 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5, 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5, 1, 5, 17, 5, 9, 5,
+ 25, 5, 5, 5, 21, 5, 13, 5, 29, 5, 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 ];
+
+ StaticTree.static_l_desc = new StaticTree(StaticTree.static_ltree, Tree.extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);
+
+ StaticTree.static_d_desc = new StaticTree(StaticTree.static_dtree, Tree.extra_dbits, 0, D_CODES, MAX_BITS);
+
+ StaticTree.static_bl_desc = new StaticTree(null, Tree.extra_blbits, 0, BL_CODES, MAX_BL_BITS);
+
+ // Deflate
+
+ var MAX_MEM_LEVEL = 9;
+ var DEF_MEM_LEVEL = 8;
+
+ function Config(good_length, max_lazy, nice_length, max_chain, func) {
+ var that = this;
+ that.good_length = good_length;
+ that.max_lazy = max_lazy;
+ that.nice_length = nice_length;
+ that.max_chain = max_chain;
+ that.func = func;
+ }
+
+ var STORED = 0;
+ var FAST = 1;
+ var SLOW = 2;
+ var config_table = [ new Config(0, 0, 0, 0, STORED), new Config(4, 4, 8, 4, FAST), new Config(4, 5, 16, 8, FAST), new Config(4, 6, 32, 32, FAST),
+ new Config(4, 4, 16, 16, SLOW), new Config(8, 16, 32, 32, SLOW), new Config(8, 16, 128, 128, SLOW), new Config(8, 32, 128, 256, SLOW),
+ new Config(32, 128, 258, 1024, SLOW), new Config(32, 258, 258, 4096, SLOW) ];
+
+ var z_errmsg = [ "need dictionary", // Z_NEED_DICT
+ // 2
+ "stream end", // Z_STREAM_END 1
+ "", // Z_OK 0
+ "", // Z_ERRNO (-1)
+ "stream error", // Z_STREAM_ERROR (-2)
+ "data error", // Z_DATA_ERROR (-3)
+ "", // Z_MEM_ERROR (-4)
+ "buffer error", // Z_BUF_ERROR (-5)
+ "",// Z_VERSION_ERROR (-6)
+ "" ];
+
+ // block not completed, need more input or more output
+ var NeedMore = 0;
+
+ // block flush performed
+ var BlockDone = 1;
+
+ // finish started, need only more output at next deflate
+ var FinishStarted = 2;
+
+ // finish done, accept no more input or output
+ var FinishDone = 3;
+
+ // preset dictionary flag in zlib header
+ var PRESET_DICT = 0x20;
+
+ var INIT_STATE = 42;
+ var BUSY_STATE = 113;
+ var FINISH_STATE = 666;
+
+ // The deflate compression method
+ var Z_DEFLATED = 8;
+
+ var STORED_BLOCK = 0;
+ var STATIC_TREES = 1;
+ var DYN_TREES = 2;
+
+ var MIN_MATCH = 3;
+ var MAX_MATCH = 258;
+ var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
+
+ function smaller(tree, n, m, depth) {
+ var tn2 = tree[n * 2];
+ var tm2 = tree[m * 2];
+ return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m]));
+ }
+
+ function Deflate() {
+
+ var that = this;
+ var strm; // pointer back to this zlib stream
+ var status; // as the name implies
+ // pending_buf; // output still pending
+ var pending_buf_size; // size of pending_buf
+ // pending_out; // next pending byte to output to the stream
+ // pending; // nb of bytes in the pending buffer
+ var method; // STORED (for zip only) or DEFLATED
+ var last_flush; // value of flush param for previous deflate call
+
+ var w_size; // LZ77 window size (32K by default)
+ var w_bits; // log2(w_size) (8..16)
+ var w_mask; // w_size - 1
+
+ var window;
+ // Sliding window. Input bytes are read into the second half of the window,
+ // and move to the first half later to keep a dictionary of at least wSize
+ // bytes. With this organization, matches are limited to a distance of
+ // wSize-MAX_MATCH bytes, but this ensures that IO is always
+ // performed with a length multiple of the block size. Also, it limits
+ // the window size to 64K, which is quite useful on MSDOS.
+ // To do: use the user input buffer as sliding window.
+
+ var window_size;
+ // Actual size of window: 2*wSize, except when the user input buffer
+ // is directly used as sliding window.
+
+ var prev;
+ // Link to older string with same hash index. To limit the size of this
+ // array to 64K, this link is maintained only for the last 32K strings.
+ // An index in this array is thus a window index modulo 32K.
+
+ var head; // Heads of the hash chains or NIL.
+
+ var ins_h; // hash index of string to be inserted
+ var hash_size; // number of elements in hash table
+ var hash_bits; // log2(hash_size)
+ var hash_mask; // hash_size-1
+
+ // Number of bits by which ins_h must be shifted at each input
+ // step. It must be such that after MIN_MATCH steps, the oldest
+ // byte no longer takes part in the hash key, that is:
+ // hash_shift * MIN_MATCH >= hash_bits
+ var hash_shift;
+
+ // Window position at the beginning of the current output block. Gets
+ // negative when the window is moved backwards.
+
+ var block_start;
+
+ var match_length; // length of best match
+ var prev_match; // previous match
+ var match_available; // set if previous match exists
+ var strstart; // start of string to insert
+ var match_start; // start of matching string
+ var lookahead; // number of valid bytes ahead in window
+
+ // Length of the best match at previous step. Matches not greater than this
+ // are discarded. This is used in the lazy match evaluation.
+ var prev_length;
+
+ // To speed up deflation, hash chains are never searched beyond this
+ // length. A higher limit improves compression ratio but degrades the speed.
+ var max_chain_length;
+
+ // Attempt to find a better match only when the current match is strictly
+ // smaller than this value. This mechanism is used only for compression
+ // levels >= 4.
+ var max_lazy_match;
+
+ // Insert new strings in the hash table only if the match length is not
+ // greater than this length. This saves time but degrades compression.
+ // max_insert_length is used only for compression levels <= 3.
+
+ var level; // compression level (1..9)
+ var strategy; // favor or force Huffman coding
+
+ // Use a faster search when the previous match is longer than this
+ var good_match;
+
+ // Stop searching when current match exceeds this
+ var nice_match;
+
+ var dyn_ltree; // literal and length tree
+ var dyn_dtree; // distance tree
+ var bl_tree; // Huffman tree for bit lengths
+
+ var l_desc = new Tree(); // desc for literal tree
+ var d_desc = new Tree(); // desc for distance tree
+ var bl_desc = new Tree(); // desc for bit length tree
+
+ // that.heap_len; // number of elements in the heap
+ // that.heap_max; // element of largest frequency
+ // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ // The same heap array is used to build all trees.
+
+ // Depth of each subtree used as tie breaker for trees of equal frequency
+ that.depth = [];
+
+ var l_buf; // index for literals or lengths */
+
+ // Size of match buffer for literals/lengths. There are 4 reasons for
+ // limiting lit_bufsize to 64K:
+ // - frequencies can be kept in 16 bit counters
+ // - if compression is not successful for the first block, all input
+ // data is still in the window so we can still emit a stored block even
+ // when input comes from standard input. (This can also be done for
+ // all blocks if lit_bufsize is not greater than 32K.)
+ // - if compression is not successful for a file smaller than 64K, we can
+ // even emit a stored file instead of a stored block (saving 5 bytes).
+ // This is applicable only for zip (not gzip or zlib).
+ // - creating new Huffman trees less frequently may not provide fast
+ // adaptation to changes in the input data statistics. (Take for
+ // example a binary file with poorly compressible code followed by
+ // a highly compressible string table.) Smaller buffer sizes give
+ // fast adaptation but have of course the overhead of transmitting
+ // trees more frequently.
+ // - I can't count above 4
+ var lit_bufsize;
+
+ var last_lit; // running index in l_buf
+
+ // Buffer for distances. To simplify the code, d_buf and l_buf have
+ // the same number of elements. To use different lengths, an extra flag
+ // array would be necessary.
+
+ var d_buf; // index of pendig_buf
+
+ // that.opt_len; // bit length of current block with optimal trees
+ // that.static_len; // bit length of current block with static trees
+ var matches; // number of string matches in current block
+ var last_eob_len; // bit length of EOB code for last block
+
+ // Output buffer. bits are inserted starting at the bottom (least
+ // significant bits).
+ var bi_buf;
+
+ // Number of valid bits in bi_buf. All bits above the last valid bit
+ // are always zero.
+ var bi_valid;
+
+ // number of codes at each bit length for an optimal tree
+ that.bl_count = [];
+
+ // heap used to build the Huffman trees
+ that.heap = [];
+
+ dyn_ltree = [];
+ dyn_dtree = [];
+ bl_tree = [];
+
+ function lm_init() {
+ var i;
+ window_size = 2 * w_size;
+
+ head[hash_size - 1] = 0;
+ for (i = 0; i < hash_size - 1; i++) {
+ head[i] = 0;
+ }
+
+ // Set the default configuration parameters:
+ max_lazy_match = config_table[level].max_lazy;
+ good_match = config_table[level].good_length;
+ nice_match = config_table[level].nice_length;
+ max_chain_length = config_table[level].max_chain;
+
+ strstart = 0;
+ block_start = 0;
+ lookahead = 0;
+ match_length = prev_length = MIN_MATCH - 1;
+ match_available = 0;
+ ins_h = 0;
+ }
+
+ function init_block() {
+ var i;
+ // Initialize the trees.
+ for (i = 0; i < L_CODES; i++)
+ dyn_ltree[i * 2] = 0;
+ for (i = 0; i < D_CODES; i++)
+ dyn_dtree[i * 2] = 0;
+ for (i = 0; i < BL_CODES; i++)
+ bl_tree[i * 2] = 0;
+
+ dyn_ltree[END_BLOCK * 2] = 1;
+ that.opt_len = that.static_len = 0;
+ last_lit = matches = 0;
+ }
+
+ // Initialize the tree data structures for a new zlib stream.
+ function tr_init() {
+
+ l_desc.dyn_tree = dyn_ltree;
+ l_desc.stat_desc = StaticTree.static_l_desc;
+
+ d_desc.dyn_tree = dyn_dtree;
+ d_desc.stat_desc = StaticTree.static_d_desc;
+
+ bl_desc.dyn_tree = bl_tree;
+ bl_desc.stat_desc = StaticTree.static_bl_desc;
+
+ bi_buf = 0;
+ bi_valid = 0;
+ last_eob_len = 8; // enough lookahead for inflate
+
+ // Initialize the first block of the first file:
+ init_block();
+ }
+
+ // Restore the heap property by moving down the tree starting at node k,
+ // exchanging a node with the smallest of its two sons if necessary,
+ // stopping
+ // when the heap property is re-established (each father smaller than its
+ // two sons).
+ that.pqdownheap = function(tree, // the tree to restore
+ k // node to move down
+ ) {
+ var heap = that.heap;
+ var v = heap[k];
+ var j = k << 1; // left son of k
+ while (j <= that.heap_len) {
+ // Set j to the smallest of the two sons:
+ if (j < that.heap_len && smaller(tree, heap[j + 1], heap[j], that.depth)) {
+ j++;
+ }
+ // Exit if v is smaller than both sons
+ if (smaller(tree, v, heap[j], that.depth))
+ break;
+
+ // Exchange v with the smallest son
+ heap[k] = heap[j];
+ k = j;
+ // And continue down the tree, setting j to the left son of k
+ j <<= 1;
+ }
+ heap[k] = v;
+ };
+
+ // Scan a literal or distance tree to determine the frequencies of the codes
+ // in the bit length tree.
+ function scan_tree(tree,// the tree to be scanned
+ max_code // and its largest code of non zero frequency
+ ) {
+ var n; // iterates over all tree elements
+ var prevlen = -1; // last emitted length
+ var curlen; // length of current code
+ var nextlen = tree[0 * 2 + 1]; // length of next code
+ var count = 0; // repeat count of the current code
+ var max_count = 7; // max repeat count
+ var min_count = 4; // min repeat count
+
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ }
+ tree[(max_code + 1) * 2 + 1] = 0xffff; // guard
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen;
+ nextlen = tree[(n + 1) * 2 + 1];
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ bl_tree[curlen * 2] += count;
+ } else if (curlen !== 0) {
+ if (curlen != prevlen)
+ bl_tree[curlen * 2]++;
+ bl_tree[REP_3_6 * 2]++;
+ } else if (count <= 10) {
+ bl_tree[REPZ_3_10 * 2]++;
+ } else {
+ bl_tree[REPZ_11_138 * 2]++;
+ }
+ count = 0;
+ prevlen = curlen;
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6;
+ min_count = 3;
+ } else {
+ max_count = 7;
+ min_count = 4;
+ }
+ }
+ }
+
+ // Construct the Huffman tree for the bit lengths and return the index in
+ // bl_order of the last bit length code to send.
+ function build_bl_tree() {
+ var max_blindex; // index of last bit length code of non zero freq
+
+ // Determine the bit length frequencies for literal and distance trees
+ scan_tree(dyn_ltree, l_desc.max_code);
+ scan_tree(dyn_dtree, d_desc.max_code);
+
+ // Build the bit length tree:
+ bl_desc.build_tree(that);
+ // opt_len now includes the length of the tree representations, except
+ // the lengths of the bit lengths codes and the 5+5+4 bits for the
+ // counts.
+
+ // Determine the number of bit length codes to send. The pkzip format
+ // requires that at least 4 bit length codes be sent. (appnote.txt says
+ // 3 but the actual value used is 4.)
+ for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {
+ if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] !== 0)
+ break;
+ }
+ // Update opt_len to include the bit length tree and counts
+ that.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
+
+ return max_blindex;
+ }
+
+ // Output a byte on the stream.
+ // IN assertion: there is enough room in pending_buf.
+ function put_byte(p) {
+ that.pending_buf[that.pending++] = p;
+ }
+
+ function put_short(w) {
+ put_byte(w & 0xff);
+ put_byte((w >>> 8) & 0xff);
+ }
+
+ function putShortMSB(b) {
+ put_byte((b >> 8) & 0xff);
+ put_byte((b & 0xff) & 0xff);
+ }
+
+ function send_bits(value, length) {
+ var val, len = length;
+ if (bi_valid > Buf_size - len) {
+ val = value;
+ // bi_buf |= (val << bi_valid);
+ bi_buf |= ((val << bi_valid) & 0xffff);
+ put_short(bi_buf);
+ bi_buf = val >>> (Buf_size - bi_valid);
+ bi_valid += len - Buf_size;
+ } else {
+ // bi_buf |= (value) << bi_valid;
+ bi_buf |= (((value) << bi_valid) & 0xffff);
+ bi_valid += len;
+ }
+ }
+
+ function send_code(c, tree) {
+ var c2 = c * 2;
+ send_bits(tree[c2] & 0xffff, tree[c2 + 1] & 0xffff);
+ }
+
+ // Send a literal or distance tree in compressed form, using the codes in
+ // bl_tree.
+ function send_tree(tree,// the tree to be sent
+ max_code // and its largest code of non zero frequency
+ ) {
+ var n; // iterates over all tree elements
+ var prevlen = -1; // last emitted length
+ var curlen; // length of current code
+ var nextlen = tree[0 * 2 + 1]; // length of next code
+ var count = 0; // repeat count of the current code
+ var max_count = 7; // max repeat count
+ var min_count = 4; // min repeat count
+
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ }
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen;
+ nextlen = tree[(n + 1) * 2 + 1];
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ do {
+ send_code(curlen, bl_tree);
+ } while (--count !== 0);
+ } else if (curlen !== 0) {
+ if (curlen != prevlen) {
+ send_code(curlen, bl_tree);
+ count--;
+ }
+ send_code(REP_3_6, bl_tree);
+ send_bits(count - 3, 2);
+ } else if (count <= 10) {
+ send_code(REPZ_3_10, bl_tree);
+ send_bits(count - 3, 3);
+ } else {
+ send_code(REPZ_11_138, bl_tree);
+ send_bits(count - 11, 7);
+ }
+ count = 0;
+ prevlen = curlen;
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6;
+ min_count = 3;
+ } else {
+ max_count = 7;
+ min_count = 4;
+ }
+ }
+ }
+
+ // Send the header for a block using dynamic Huffman trees: the counts, the
+ // lengths of the bit length codes, the literal tree and the distance tree.
+ // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ function send_all_trees(lcodes, dcodes, blcodes) {
+ var rank; // index in bl_order
+
+ send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt
+ send_bits(dcodes - 1, 5);
+ send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt
+ for (rank = 0; rank < blcodes; rank++) {
+ send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3);
+ }
+ send_tree(dyn_ltree, lcodes - 1); // literal tree
+ send_tree(dyn_dtree, dcodes - 1); // distance tree
+ }
+
+ // Flush the bit buffer, keeping at most 7 bits in it.
+ function bi_flush() {
+ if (bi_valid == 16) {
+ put_short(bi_buf);
+ bi_buf = 0;
+ bi_valid = 0;
+ } else if (bi_valid >= 8) {
+ put_byte(bi_buf & 0xff);
+ bi_buf >>>= 8;
+ bi_valid -= 8;
+ }
+ }
+
+ // Send one empty static block to give enough lookahead for inflate.
+ // This takes 10 bits, of which 7 may remain in the bit buffer.
+ // The current inflate code requires 9 bits of lookahead. If the
+ // last two codes for the previous block (real code plus EOB) were coded
+ // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ // the last real code. In this case we send two empty static blocks instead
+ // of one. (There are no problems if the previous block is stored or fixed.)
+ // To simplify the code, we assume the worst case of last real code encoded
+ // on one bit only.
+ function _tr_align() {
+ send_bits(STATIC_TREES << 1, 3);
+ send_code(END_BLOCK, StaticTree.static_ltree);
+
+ bi_flush();
+
+ // Of the 10 bits for the empty block, we have already sent
+ // (10 - bi_valid) bits. The lookahead for the last real code (before
+ // the EOB of the previous block) was thus at least one plus the length
+ // of the EOB plus what we have just sent of the empty static block.
+ if (1 + last_eob_len + 10 - bi_valid < 9) {
+ send_bits(STATIC_TREES << 1, 3);
+ send_code(END_BLOCK, StaticTree.static_ltree);
+ bi_flush();
+ }
+ last_eob_len = 7;
+ }
+
+ // Save the match info and tally the frequency counts. Return true if
+ // the current block must be flushed.
+ function _tr_tally(dist, // distance of matched string
+ lc // match length-MIN_MATCH or unmatched char (if dist==0)
+ ) {
+ var out_length, in_length, dcode;
+ that.pending_buf[d_buf + last_lit * 2] = (dist >>> 8) & 0xff;
+ that.pending_buf[d_buf + last_lit * 2 + 1] = dist & 0xff;
+
+ that.pending_buf[l_buf + last_lit] = lc & 0xff;
+ last_lit++;
+
+ if (dist === 0) {
+ // lc is the unmatched char
+ dyn_ltree[lc * 2]++;
+ } else {
+ matches++;
+ // Here, lc is the match length - MIN_MATCH
+ dist--; // dist = match distance - 1
+ dyn_ltree[(Tree._length_code[lc] + LITERALS + 1) * 2]++;
+ dyn_dtree[Tree.d_code(dist) * 2]++;
+ }
+
+ if ((last_lit & 0x1fff) === 0 && level > 2) {
+ // Compute an upper bound for the compressed length
+ out_length = last_lit * 8;
+ in_length = strstart - block_start;
+ for (dcode = 0; dcode < D_CODES; dcode++) {
+ out_length += dyn_dtree[dcode * 2] * (5 + Tree.extra_dbits[dcode]);
+ }
+ out_length >>>= 3;
+ if ((matches < Math.floor(last_lit / 2)) && out_length < Math.floor(in_length / 2))
+ return true;
+ }
+
+ return (last_lit == lit_bufsize - 1);
+ // We avoid equality with lit_bufsize because of wraparound at 64K
+ // on 16 bit machines and because stored blocks are restricted to
+ // 64K-1 bytes.
+ }
+
+ // Send the block data compressed using the given Huffman trees
+ function compress_block(ltree, dtree) {
+ var dist; // distance of matched string
+ var lc; // match length or unmatched char (if dist === 0)
+ var lx = 0; // running index in l_buf
+ var code; // the code to send
+ var extra; // number of extra bits to send
+
+ if (last_lit !== 0) {
+ do {
+ dist = ((that.pending_buf[d_buf + lx * 2] << 8) & 0xff00) | (that.pending_buf[d_buf + lx * 2 + 1] & 0xff);
+ lc = (that.pending_buf[l_buf + lx]) & 0xff;
+ lx++;
+
+ if (dist === 0) {
+ send_code(lc, ltree); // send a literal byte
+ } else {
+ // Here, lc is the match length - MIN_MATCH
+ code = Tree._length_code[lc];
+
+ send_code(code + LITERALS + 1, ltree); // send the length
+ // code
+ extra = Tree.extra_lbits[code];
+ if (extra !== 0) {
+ lc -= Tree.base_length[code];
+ send_bits(lc, extra); // send the extra length bits
+ }
+ dist--; // dist is now the match distance - 1
+ code = Tree.d_code(dist);
+
+ send_code(code, dtree); // send the distance code
+ extra = Tree.extra_dbits[code];
+ if (extra !== 0) {
+ dist -= Tree.base_dist[code];
+ send_bits(dist, extra); // send the extra distance bits
+ }
+ } // literal or match pair ?
+
+ // Check that the overlay between pending_buf and d_buf+l_buf is
+ // ok:
+ } while (lx < last_lit);
+ }
+
+ send_code(END_BLOCK, ltree);
+ last_eob_len = ltree[END_BLOCK * 2 + 1];
+ }
+
+ // Flush the bit buffer and align the output on a byte boundary
+ function bi_windup() {
+ if (bi_valid > 8) {
+ put_short(bi_buf);
+ } else if (bi_valid > 0) {
+ put_byte(bi_buf & 0xff);
+ }
+ bi_buf = 0;
+ bi_valid = 0;
+ }
+
+ // Copy a stored block, storing first the length and its
+ // one's complement if requested.
+ function copy_block(buf, // the input data
+ len, // its length
+ header // true if block header must be written
+ ) {
+ bi_windup(); // align on byte boundary
+ last_eob_len = 8; // enough lookahead for inflate
+
+ if (header) {
+ put_short(len);
+ put_short(~len);
+ }
+
+ that.pending_buf.set(window.subarray(buf, buf + len), that.pending);
+ that.pending += len;
+ }
+
+ // Send a stored block
+ function _tr_stored_block(buf, // input block
+ stored_len, // length of input block
+ eof // true if this is the last block for a file
+ ) {
+ send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type
+ copy_block(buf, stored_len, true); // with header
+ }
+
+ // Determine the best encoding for the current block: dynamic trees, static
+ // trees or store, and output the encoded block to the zip file.
+ function _tr_flush_block(buf, // input block, or NULL if too old
+ stored_len, // length of input block
+ eof // true if this is the last block for a file
+ ) {
+ var opt_lenb, static_lenb;// opt_len and static_len in bytes
+ var max_blindex = 0; // index of last bit length code of non zero freq
+
+ // Build the Huffman trees unless a stored block is forced
+ if (level > 0) {
+ // Construct the literal and distance trees
+ l_desc.build_tree(that);
+
+ d_desc.build_tree(that);
+
+ // At this point, opt_len and static_len are the total bit lengths
+ // of
+ // the compressed block data, excluding the tree representations.
+
+ // Build the bit length tree for the above two trees, and get the
+ // index
+ // in bl_order of the last bit length code to send.
+ max_blindex = build_bl_tree();
+
+ // Determine the best encoding. Compute first the block length in
+ // bytes
+ opt_lenb = (that.opt_len + 3 + 7) >>> 3;
+ static_lenb = (that.static_len + 3 + 7) >>> 3;
+
+ if (static_lenb <= opt_lenb)
+ opt_lenb = static_lenb;
+ } else {
+ opt_lenb = static_lenb = stored_len + 5; // force a stored block
+ }
+
+ if ((stored_len + 4 <= opt_lenb) && buf != -1) {
+ // 4: two words for the lengths
+ // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ // Otherwise we can't have processed more than WSIZE input bytes
+ // since
+ // the last block flush, because compression would have been
+ // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ // transform a block into a stored block.
+ _tr_stored_block(buf, stored_len, eof);
+ } else if (static_lenb == opt_lenb) {
+ send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3);
+ compress_block(StaticTree.static_ltree, StaticTree.static_dtree);
+ } else {
+ send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3);
+ send_all_trees(l_desc.max_code + 1, d_desc.max_code + 1, max_blindex + 1);
+ compress_block(dyn_ltree, dyn_dtree);
+ }
+
+ // The above check is made mod 2^32, for files larger than 512 MB
+ // and uLong implemented on 32 bits.
+
+ init_block();
+
+ if (eof) {
+ bi_windup();
+ }
+ }
+
+ function flush_block_only(eof) {
+ _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof);
+ block_start = strstart;
+ strm.flush_pending();
+ }
+
+ // Fill the window when the lookahead becomes insufficient.
+ // Updates strstart and lookahead.
+ //
+ // IN assertion: lookahead < MIN_LOOKAHEAD
+ // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ // At least one byte has been read, or avail_in === 0; reads are
+ // performed for at least two bytes (required for the zip translate_eol
+ // option -- not supported here).
+ function fill_window() {
+ var n, m;
+ var p;
+ var more; // Amount of free space at the end of the window.
+
+ do {
+ more = (window_size - lookahead - strstart);
+
+ // Deal with !@#$% 64K limit:
+ if (more === 0 && strstart === 0 && lookahead === 0) {
+ more = w_size;
+ } else if (more == -1) {
+ // Very unlikely, but possible on 16 bit machine if strstart ==
+ // 0
+ // and lookahead == 1 (input done one byte at time)
+ more--;
+
+ // If the window is almost full and there is insufficient
+ // lookahead,
+ // move the upper half to the lower one to make room in the
+ // upper half.
+ } else if (strstart >= w_size + w_size - MIN_LOOKAHEAD) {
+ window.set(window.subarray(w_size, w_size + w_size), 0);
+
+ match_start -= w_size;
+ strstart -= w_size; // we now have strstart >= MAX_DIST
+ block_start -= w_size;
+
+ // Slide the hash table (could be avoided with 32 bit values
+ // at the expense of memory usage). We slide even when level ==
+ // 0
+ // to keep the hash table consistent if we switch back to level
+ // > 0
+ // later. (Using level 0 permanently is not an optimal usage of
+ // zlib, so we don't care about this pathological case.)
+
+ n = hash_size;
+ p = n;
+ do {
+ m = (head[--p] & 0xffff);
+ head[p] = (m >= w_size ? m - w_size : 0);
+ } while (--n !== 0);
+
+ n = w_size;
+ p = n;
+ do {
+ m = (prev[--p] & 0xffff);
+ prev[p] = (m >= w_size ? m - w_size : 0);
+ // If n is not on any hash chain, prev[n] is garbage but
+ // its value will never be used.
+ } while (--n !== 0);
+ more += w_size;
+ }
+
+ if (strm.avail_in === 0)
+ return;
+
+ // If there was no sliding:
+ // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ // more == window_size - lookahead - strstart
+ // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ // => more >= window_size - 2*WSIZE + 2
+ // In the BIG_MEM or MMAP case (not yet supported),
+ // window_size == input_size + MIN_LOOKAHEAD &&
+ // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ // Otherwise, window_size == 2*WSIZE so more >= 2.
+ // If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+
+ n = strm.read_buf(window, strstart + lookahead, more);
+ lookahead += n;
+
+ // Initialize the hash value now that we have some input:
+ if (lookahead >= MIN_MATCH) {
+ ins_h = window[strstart] & 0xff;
+ ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
+ }
+ // If the whole input has less than MIN_MATCH bytes, ins_h is
+ // garbage,
+ // but this is not important since only literal bytes will be
+ // emitted.
+ } while (lookahead < MIN_LOOKAHEAD && strm.avail_in !== 0);
+ }
+
+ // Copy without compression as much as possible from the input stream,
+ // return
+ // the current block state.
+ // This function does not insert new strings in the dictionary since
+ // uncompressible data is probably not useful. This function is used
+ // only for the level=0 compression option.
+ // NOTE: this function should be optimized to avoid extra copying from
+ // window to pending_buf.
+ function deflate_stored(flush) {
+ // Stored blocks are limited to 0xffff bytes, pending_buf is limited
+ // to pending_buf_size, and each stored block has a 5 byte header:
+
+ var max_block_size = 0xffff;
+ var max_start;
+
+ if (max_block_size > pending_buf_size - 5) {
+ max_block_size = pending_buf_size - 5;
+ }
+
+ // Copy as much as possible from input to output:
+ while (true) {
+ // Fill the window as much as possible:
+ if (lookahead <= 1) {
+ fill_window();
+ if (lookahead === 0 && flush == Z_NO_FLUSH)
+ return NeedMore;
+ if (lookahead === 0)
+ break; // flush the current block
+ }
+
+ strstart += lookahead;
+ lookahead = 0;
+
+ // Emit a stored block if pending_buf will be full:
+ max_start = block_start + max_block_size;
+ if (strstart === 0 || strstart >= max_start) {
+ // strstart === 0 is possible when wraparound on 16-bit machine
+ lookahead = (strstart - max_start);
+ strstart = max_start;
+
+ flush_block_only(false);
+ if (strm.avail_out === 0)
+ return NeedMore;
+
+ }
+
+ // Flush if we may have to slide, otherwise block_start may become
+ // negative and the data will be gone:
+ if (strstart - block_start >= w_size - MIN_LOOKAHEAD) {
+ flush_block_only(false);
+ if (strm.avail_out === 0)
+ return NeedMore;
+ }
+ }
+
+ flush_block_only(flush == Z_FINISH);
+ if (strm.avail_out === 0)
+ return (flush == Z_FINISH) ? FinishStarted : NeedMore;
+
+ return flush == Z_FINISH ? FinishDone : BlockDone;
+ }
+
+ function longest_match(cur_match) {
+ var chain_length = max_chain_length; // max hash chain length
+ var scan = strstart; // current string
+ var match; // matched string
+ var len; // length of current match
+ var best_len = prev_length; // best match length so far
+ var limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0;
+ var _nice_match = nice_match;
+
+ // Stop when cur_match becomes <= limit. To simplify the code,
+ // we prevent matches with the string of window index 0.
+
+ var wmask = w_mask;
+
+ var strend = strstart + MAX_MATCH;
+ var scan_end1 = window[scan + best_len - 1];
+ var scan_end = window[scan + best_len];
+
+ // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of
+ // 16.
+ // It is easy to get rid of this optimization if necessary.
+
+ // Do not waste too much time if we already have a good match:
+ if (prev_length >= good_match) {
+ chain_length >>= 2;
+ }
+
+ // Do not look for matches beyond the end of the input. This is
+ // necessary
+ // to make deflate deterministic.
+ if (_nice_match > lookahead)
+ _nice_match = lookahead;
+
+ do {
+ match = cur_match;
+
+ // Skip to next match if the match length cannot increase
+ // or if the match length is less than 2:
+ if (window[match + best_len] != scan_end || window[match + best_len - 1] != scan_end1 || window[match] != window[scan]
+ || window[++match] != window[scan + 1])
+ continue;
+
+ // The check at best_len-1 can be removed because it will be made
+ // again later. (This heuristic is not always a win.)
+ // It is not necessary to compare scan[2] and match[2] since they
+ // are always equal when the other bytes match, given that
+ // the hash keys are equal and that HASH_BITS >= 8.
+ scan += 2;
+ match++;
+
+ // We check for insufficient lookahead only every 8th comparison;
+ // the 256th check will be made at strstart+258.
+ do {
+ } while (window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match]
+ && window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match]
+ && window[++scan] == window[++match] && window[++scan] == window[++match] && scan < strend);
+
+ len = MAX_MATCH - (strend - scan);
+ scan = strend - MAX_MATCH;
+
+ if (len > best_len) {
+ match_start = cur_match;
+ best_len = len;
+ if (len >= _nice_match)
+ break;
+ scan_end1 = window[scan + best_len - 1];
+ scan_end = window[scan + best_len];
+ }
+
+ } while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length !== 0);
+
+ if (best_len <= lookahead)
+ return best_len;
+ return lookahead;
+ }
+
+ // Compress as much as possible from the input stream, return the current
+ // block state.
+ // This function does not perform lazy evaluation of matches and inserts
+ // new strings in the dictionary only for unmatched strings or for short
+ // matches. It is used only for the fast compression options.
+ function deflate_fast(flush) {
+ // short hash_head = 0; // head of the hash chain
+ var hash_head = 0; // head of the hash chain
+ var bflush; // set if current block must be flushed
+
+ while (true) {
+ // Make sure that we always have enough lookahead, except
+ // at the end of the input file. We need MAX_MATCH bytes
+ // for the next match, plus MIN_MATCH bytes to insert the
+ // string following the next match.
+ if (lookahead < MIN_LOOKAHEAD) {
+ fill_window();
+ if (lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return NeedMore;
+ }
+ if (lookahead === 0)
+ break; // flush the current block
+ }
+
+ // Insert the string window[strstart .. strstart+2] in the
+ // dictionary, and set hash_head to the head of the hash chain:
+ if (lookahead >= MIN_MATCH) {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = strstart;
+ }
+
+ // Find the longest match, discarding those <= prev_length.
+ // At this point we have always match_length < MIN_MATCH
+
+ if (hash_head !== 0 && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) {
+ // To simplify the code, we prevent matches with the string
+ // of window index 0 (in particular we have to avoid a match
+ // of the string with itself at the start of the input file).
+ if (strategy != Z_HUFFMAN_ONLY) {
+ match_length = longest_match(hash_head);
+ }
+ // longest_match() sets match_start
+ }
+ if (match_length >= MIN_MATCH) {
+ // check_match(strstart, match_start, match_length);
+
+ bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH);
+
+ lookahead -= match_length;
+
+ // Insert new strings in the hash table only if the match length
+ // is not too large. This saves time but degrades compression.
+ if (match_length <= max_lazy_match && lookahead >= MIN_MATCH) {
+ match_length--; // string at strstart already in hash table
+ do {
+ strstart++;
+
+ ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = strstart;
+
+ // strstart never exceeds WSIZE-MAX_MATCH, so there are
+ // always MIN_MATCH bytes ahead.
+ } while (--match_length !== 0);
+ strstart++;
+ } else {
+ strstart += match_length;
+ match_length = 0;
+ ins_h = window[strstart] & 0xff;
+
+ ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
+ // If lookahead < MIN_MATCH, ins_h is garbage, but it does
+ // not
+ // matter since it will be recomputed at next deflate call.
+ }
+ } else {
+ // No match, output a literal byte
+
+ bflush = _tr_tally(0, window[strstart] & 0xff);
+ lookahead--;
+ strstart++;
+ }
+ if (bflush) {
+
+ flush_block_only(false);
+ if (strm.avail_out === 0)
+ return NeedMore;
+ }
+ }
+
+ flush_block_only(flush == Z_FINISH);
+ if (strm.avail_out === 0) {
+ if (flush == Z_FINISH)
+ return FinishStarted;
+ else
+ return NeedMore;
+ }
+ return flush == Z_FINISH ? FinishDone : BlockDone;
+ }
+
+ // Same as above, but achieves better compression. We use a lazy
+ // evaluation for matches: a match is finally adopted only if there is
+ // no better match at the next window position.
+ function deflate_slow(flush) {
+ // short hash_head = 0; // head of hash chain
+ var hash_head = 0; // head of hash chain
+ var bflush; // set if current block must be flushed
+ var max_insert;
+
+ // Process the input block.
+ while (true) {
+ // Make sure that we always have enough lookahead, except
+ // at the end of the input file. We need MAX_MATCH bytes
+ // for the next match, plus MIN_MATCH bytes to insert the
+ // string following the next match.
+
+ if (lookahead < MIN_LOOKAHEAD) {
+ fill_window();
+ if (lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return NeedMore;
+ }
+ if (lookahead === 0)
+ break; // flush the current block
+ }
+
+ // Insert the string window[strstart .. strstart+2] in the
+ // dictionary, and set hash_head to the head of the hash chain:
+
+ if (lookahead >= MIN_MATCH) {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = strstart;
+ }
+
+ // Find the longest match, discarding those <= prev_length.
+ prev_length = match_length;
+ prev_match = match_start;
+ match_length = MIN_MATCH - 1;
+
+ if (hash_head !== 0 && prev_length < max_lazy_match && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) {
+ // To simplify the code, we prevent matches with the string
+ // of window index 0 (in particular we have to avoid a match
+ // of the string with itself at the start of the input file).
+
+ if (strategy != Z_HUFFMAN_ONLY) {
+ match_length = longest_match(hash_head);
+ }
+ // longest_match() sets match_start
+
+ if (match_length <= 5 && (strategy == Z_FILTERED || (match_length == MIN_MATCH && strstart - match_start > 4096))) {
+
+ // If prev_match is also MIN_MATCH, match_start is garbage
+ // but we will ignore the current match anyway.
+ match_length = MIN_MATCH - 1;
+ }
+ }
+
+ // If there was a match at the previous step and the current
+ // match is not better, output the previous match:
+ if (prev_length >= MIN_MATCH && match_length <= prev_length) {
+ max_insert = strstart + lookahead - MIN_MATCH;
+ // Do not insert strings in hash table beyond this.
+
+ // check_match(strstart-1, prev_match, prev_length);
+
+ bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH);
+
+ // Insert in hash table all strings up to the end of the match.
+ // strstart-1 and strstart are already inserted. If there is not
+ // enough lookahead, the last two strings are not inserted in
+ // the hash table.
+ lookahead -= prev_length - 1;
+ prev_length -= 2;
+ do {
+ if (++strstart <= max_insert) {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = strstart;
+ }
+ } while (--prev_length !== 0);
+ match_available = 0;
+ match_length = MIN_MATCH - 1;
+ strstart++;
+
+ if (bflush) {
+ flush_block_only(false);
+ if (strm.avail_out === 0)
+ return NeedMore;
+ }
+ } else if (match_available !== 0) {
+
+ // If there was no match at the previous position, output a
+ // single literal. If there was a match but the current match
+ // is longer, truncate the previous match to a single literal.
+
+ bflush = _tr_tally(0, window[strstart - 1] & 0xff);
+
+ if (bflush) {
+ flush_block_only(false);
+ }
+ strstart++;
+ lookahead--;
+ if (strm.avail_out === 0)
+ return NeedMore;
+ } else {
+ // There is no previous match to compare with, wait for
+ // the next step to decide.
+
+ match_available = 1;
+ strstart++;
+ lookahead--;
+ }
+ }
+
+ if (match_available !== 0) {
+ bflush = _tr_tally(0, window[strstart - 1] & 0xff);
+ match_available = 0;
+ }
+ flush_block_only(flush == Z_FINISH);
+
+ if (strm.avail_out === 0) {
+ if (flush == Z_FINISH)
+ return FinishStarted;
+ else
+ return NeedMore;
+ }
+
+ return flush == Z_FINISH ? FinishDone : BlockDone;
+ }
+
+ function deflateReset(strm) {
+ strm.total_in = strm.total_out = 0;
+ strm.msg = null; //
+
+ that.pending = 0;
+ that.pending_out = 0;
+
+ status = BUSY_STATE;
+
+ last_flush = Z_NO_FLUSH;
+
+ tr_init();
+ lm_init();
+ return Z_OK;
+ }
+
+ that.deflateInit = function(strm, _level, bits, _method, memLevel, _strategy) {
+ if (!_method)
+ _method = Z_DEFLATED;
+ if (!memLevel)
+ memLevel = DEF_MEM_LEVEL;
+ if (!_strategy)
+ _strategy = Z_DEFAULT_STRATEGY;
+
+ // byte[] my_version=ZLIB_VERSION;
+
+ //
+ // if (!version || version[0] != my_version[0]
+ // || stream_size != sizeof(z_stream)) {
+ // return Z_VERSION_ERROR;
+ // }
+
+ strm.msg = null;
+
+ if (_level == Z_DEFAULT_COMPRESSION)
+ _level = 6;
+
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || _method != Z_DEFLATED || bits < 9 || bits > 15 || _level < 0 || _level > 9 || _strategy < 0
+ || _strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+
+ strm.dstate = that;
+
+ w_bits = bits;
+ w_size = 1 << w_bits;
+ w_mask = w_size - 1;
+
+ hash_bits = memLevel + 7;
+ hash_size = 1 << hash_bits;
+ hash_mask = hash_size - 1;
+ hash_shift = Math.floor((hash_bits + MIN_MATCH - 1) / MIN_MATCH);
+
+ window = new Uint8Array(w_size * 2);
+ prev = [];
+ head = [];
+
+ lit_bufsize = 1 << (memLevel + 6); // 16K elements by default
+
+ // We overlay pending_buf and d_buf+l_buf. This works since the average
+ // output size for (length,distance) codes is <= 24 bits.
+ that.pending_buf = new Uint8Array(lit_bufsize * 4);
+ pending_buf_size = lit_bufsize * 4;
+
+ d_buf = Math.floor(lit_bufsize / 2);
+ l_buf = (1 + 2) * lit_bufsize;
+
+ level = _level;
+
+ strategy = _strategy;
+ method = _method & 0xff;
+
+ return deflateReset(strm);
+ };
+
+ that.deflateEnd = function() {
+ if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE) {
+ return Z_STREAM_ERROR;
+ }
+ // Deallocate in reverse order of allocations:
+ that.pending_buf = null;
+ head = null;
+ prev = null;
+ window = null;
+ // free
+ that.dstate = null;
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+ };
+
+ that.deflateParams = function(strm, _level, _strategy) {
+ var err = Z_OK;
+
+ if (_level == Z_DEFAULT_COMPRESSION) {
+ _level = 6;
+ }
+ if (_level < 0 || _level > 9 || _strategy < 0 || _strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+
+ if (config_table[level].func != config_table[_level].func && strm.total_in !== 0) {
+ // Flush the last buffer:
+ err = strm.deflate(Z_PARTIAL_FLUSH);
+ }
+
+ if (level != _level) {
+ level = _level;
+ max_lazy_match = config_table[level].max_lazy;
+ good_match = config_table[level].good_length;
+ nice_match = config_table[level].nice_length;
+ max_chain_length = config_table[level].max_chain;
+ }
+ strategy = _strategy;
+ return err;
+ };
+
+ that.deflateSetDictionary = function(strm, dictionary, dictLength) {
+ var length = dictLength;
+ var n, index = 0;
+
+ if (!dictionary || status != INIT_STATE)
+ return Z_STREAM_ERROR;
+
+ if (length < MIN_MATCH)
+ return Z_OK;
+ if (length > w_size - MIN_LOOKAHEAD) {
+ length = w_size - MIN_LOOKAHEAD;
+ index = dictLength - length; // use the tail of the dictionary
+ }
+ window.set(dictionary.subarray(index, index + length), 0);
+
+ strstart = length;
+ block_start = length;
+
+ // Insert all strings in the hash table (except for the last two bytes).
+ // s->lookahead stays null, so s->ins_h will be recomputed at the next
+ // call of fill_window.
+
+ ins_h = window[0] & 0xff;
+ ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask;
+
+ for (n = 0; n <= length - MIN_MATCH; n++) {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ prev[n & w_mask] = head[ins_h];
+ head[ins_h] = n;
+ }
+ return Z_OK;
+ };
+
+ that.deflate = function(_strm, flush) {
+ var i, header, level_flags, old_flush, bstate;
+
+ if (flush > Z_FINISH || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+
+ if (!_strm.next_out || (!_strm.next_in && _strm.avail_in !== 0) || (status == FINISH_STATE && flush != Z_FINISH)) {
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_STREAM_ERROR)];
+ return Z_STREAM_ERROR;
+ }
+ if (_strm.avail_out === 0) {
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
+ return Z_BUF_ERROR;
+ }
+
+ strm = _strm; // just in case
+ old_flush = last_flush;
+ last_flush = flush;
+
+ // Write the zlib header
+ if (status == INIT_STATE) {
+ header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8;
+ level_flags = ((level - 1) & 0xff) >> 1;
+
+ if (level_flags > 3)
+ level_flags = 3;
+ header |= (level_flags << 6);
+ if (strstart !== 0)
+ header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ status = BUSY_STATE;
+ putShortMSB(header);
+ }
+
+ // Flush as much pending output as possible
+ if (that.pending !== 0) {
+ strm.flush_pending();
+ if (strm.avail_out === 0) {
+ // console.log(" avail_out==0");
+ // Since avail_out is 0, deflate will be called again with
+ // more output space, but possibly with both pending and
+ // avail_in equal to zero. There won't be anything to do,
+ // but this is not an error situation so make sure we
+ // return OK instead of BUF_ERROR at next call of deflate:
+ last_flush = -1;
+ return Z_OK;
+ }
+
+ // Make sure there is something to do and avoid duplicate
+ // consecutive
+ // flushes. For repeated and useless calls with Z_FINISH, we keep
+ // returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ } else if (strm.avail_in === 0 && flush <= old_flush && flush != Z_FINISH) {
+ strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
+ return Z_BUF_ERROR;
+ }
+
+ // User must not provide more input after the first FINISH:
+ if (status == FINISH_STATE && strm.avail_in !== 0) {
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
+ return Z_BUF_ERROR;
+ }
+
+ // Start a new block or continue the current one.
+ if (strm.avail_in !== 0 || lookahead !== 0 || (flush != Z_NO_FLUSH && status != FINISH_STATE)) {
+ bstate = -1;
+ switch (config_table[level].func) {
+ case STORED:
+ bstate = deflate_stored(flush);
+ break;
+ case FAST:
+ bstate = deflate_fast(flush);
+ break;
+ case SLOW:
+ bstate = deflate_slow(flush);
+ break;
+ default:
+ }
+
+ if (bstate == FinishStarted || bstate == FinishDone) {
+ status = FINISH_STATE;
+ }
+ if (bstate == NeedMore || bstate == FinishStarted) {
+ if (strm.avail_out === 0) {
+ last_flush = -1; // avoid BUF_ERROR next call, see above
+ }
+ return Z_OK;
+ // If flush != Z_NO_FLUSH && avail_out === 0, the next call
+ // of deflate should use the same flush parameter to make sure
+ // that the flush is complete. So we don't have to output an
+ // empty block here, this will be done at next call. This also
+ // ensures that for a very small output buffer, we emit at most
+ // one empty block.
+ }
+
+ if (bstate == BlockDone) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ _tr_align();
+ } else { // FULL_FLUSH or SYNC_FLUSH
+ _tr_stored_block(0, 0, false);
+ // For a full flush, this empty block will be recognized
+ // as a special marker by inflate_sync().
+ if (flush == Z_FULL_FLUSH) {
+ // state.head[s.hash_size-1]=0;
+ for (i = 0; i < hash_size/*-1*/; i++)
+ // forget history
+ head[i] = 0;
+ }
+ }
+ strm.flush_pending();
+ if (strm.avail_out === 0) {
+ last_flush = -1; // avoid BUF_ERROR at next call, see above
+ return Z_OK;
+ }
+ }
+ }
+
+ if (flush != Z_FINISH)
+ return Z_OK;
+ return Z_STREAM_END;
+ };
+ }
+
+ // ZStream
+
+ function ZStream() {
+ var that = this;
+ that.next_in_index = 0;
+ that.next_out_index = 0;
+ // that.next_in; // next input byte
+ that.avail_in = 0; // number of bytes available at next_in
+ that.total_in = 0; // total nb of input bytes read so far
+ // that.next_out; // next output byte should be put there
+ that.avail_out = 0; // remaining free space at next_out
+ that.total_out = 0; // total nb of bytes output so far
+ // that.msg;
+ // that.dstate;
+ }
+
+ ZStream.prototype = {
+ deflateInit : function(level, bits) {
+ var that = this;
+ that.dstate = new Deflate();
+ if (!bits)
+ bits = MAX_BITS;
+ return that.dstate.deflateInit(that, level, bits);
+ },
+
+ deflate : function(flush) {
+ var that = this;
+ if (!that.dstate) {
+ return Z_STREAM_ERROR;
+ }
+ return that.dstate.deflate(that, flush);
+ },
+
+ deflateEnd : function() {
+ var that = this;
+ if (!that.dstate)
+ return Z_STREAM_ERROR;
+ var ret = that.dstate.deflateEnd();
+ that.dstate = null;
+ return ret;
+ },
+
+ deflateParams : function(level, strategy) {
+ var that = this;
+ if (!that.dstate)
+ return Z_STREAM_ERROR;
+ return that.dstate.deflateParams(that, level, strategy);
+ },
+
+ deflateSetDictionary : function(dictionary, dictLength) {
+ var that = this;
+ if (!that.dstate)
+ return Z_STREAM_ERROR;
+ return that.dstate.deflateSetDictionary(that, dictionary, dictLength);
+ },
+
+ // Read a new buffer from the current input stream, update the
+ // total number of bytes read. All deflate() input goes through
+ // this function so some applications may wish to modify it to avoid
+ // allocating a large strm->next_in buffer and copying from it.
+ // (See also flush_pending()).
+ read_buf : function(buf, start, size) {
+ var that = this;
+ var len = that.avail_in;
+ if (len > size)
+ len = size;
+ if (len === 0)
+ return 0;
+ that.avail_in -= len;
+ buf.set(that.next_in.subarray(that.next_in_index, that.next_in_index + len), start);
+ that.next_in_index += len;
+ that.total_in += len;
+ return len;
+ },
+
+ // Flush as much pending output as possible. All deflate() output goes
+ // through this function so some applications may wish to modify it
+ // to avoid allocating a large strm->next_out buffer and copying into it.
+ // (See also read_buf()).
+ flush_pending : function() {
+ var that = this;
+ var len = that.dstate.pending;
+
+ if (len > that.avail_out)
+ len = that.avail_out;
+ if (len === 0)
+ return;
+
+ // if (that.dstate.pending_buf.length <= that.dstate.pending_out || that.next_out.length <= that.next_out_index
+ // || that.dstate.pending_buf.length < (that.dstate.pending_out + len) || that.next_out.length < (that.next_out_index +
+ // len)) {
+ // console.log(that.dstate.pending_buf.length + ", " + that.dstate.pending_out + ", " + that.next_out.length + ", " +
+ // that.next_out_index + ", " + len);
+ // console.log("avail_out=" + that.avail_out);
+ // }
+
+ that.next_out.set(that.dstate.pending_buf.subarray(that.dstate.pending_out, that.dstate.pending_out + len), that.next_out_index);
+
+ that.next_out_index += len;
+ that.dstate.pending_out += len;
+ that.total_out += len;
+ that.avail_out -= len;
+ that.dstate.pending -= len;
+ if (that.dstate.pending === 0) {
+ that.dstate.pending_out = 0;
+ }
+ }
+ };
+
+ // Deflater
+
+ function Deflater(options) {
+ var that = this;
+ var z = new ZStream();
+ var bufsize = 512;
+ var flush = Z_NO_FLUSH;
+ var buf = new Uint8Array(bufsize);
+ var level = options ? options.level : Z_DEFAULT_COMPRESSION;
+ if (typeof level == "undefined")
+ level = Z_DEFAULT_COMPRESSION;
+ z.deflateInit(level);
+ z.next_out = buf;
+
+ that.append = function(data, onprogress) {
+ var err, buffers = [], lastIndex = 0, bufferIndex = 0, bufferSize = 0, array;
+ if (!data.length)
+ return;
+ z.next_in_index = 0;
+ z.next_in = data;
+ z.avail_in = data.length;
+ do {
+ z.next_out_index = 0;
+ z.avail_out = bufsize;
+ err = z.deflate(flush);
+ if (err != Z_OK)
+ throw new Error("deflating: " + z.msg);
+ if (z.next_out_index)
+ if (z.next_out_index == bufsize)
+ buffers.push(new Uint8Array(buf));
+ else
+ buffers.push(new Uint8Array(buf.subarray(0, z.next_out_index)));
+ bufferSize += z.next_out_index;
+ if (onprogress && z.next_in_index > 0 && z.next_in_index != lastIndex) {
+ onprogress(z.next_in_index);
+ lastIndex = z.next_in_index;
+ }
+ } while (z.avail_in > 0 || z.avail_out === 0);
+ array = new Uint8Array(bufferSize);
+ buffers.forEach(function(chunk) {
+ array.set(chunk, bufferIndex);
+ bufferIndex += chunk.length;
+ });
+ return array;
+ };
+ that.flush = function() {
+ var err, buffers = [], bufferIndex = 0, bufferSize = 0, array;
+ do {
+ z.next_out_index = 0;
+ z.avail_out = bufsize;
+ err = z.deflate(Z_FINISH);
+ if (err != Z_STREAM_END && err != Z_OK)
+ throw new Error("deflating: " + z.msg);
+ if (bufsize - z.avail_out > 0)
+ buffers.push(new Uint8Array(buf.subarray(0, z.next_out_index)));
+ bufferSize += z.next_out_index;
+ } while (z.avail_in > 0 || z.avail_out === 0);
+ z.deflateEnd();
+ array = new Uint8Array(bufferSize);
+ buffers.forEach(function(chunk) {
+ array.set(chunk, bufferIndex);
+ bufferIndex += chunk.length;
+ });
+ return array;
+ };
+ }
+
+ // 'zip' may not be defined in z-worker and some tests
+ var env = global.zip || global;
+ env.Deflater = env._jzlib_Deflater = Deflater;
+})(this);
diff --git a/sync/vendor/zipjs/inflate.js b/sync/vendor/zipjs/inflate.js
new file mode 100755
index 00000000..7174050c
--- /dev/null
+++ b/sync/vendor/zipjs/inflate.js
@@ -0,0 +1,2155 @@
+/*
+ Copyright (c) 2013 Gildas Lormeau. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the distribution.
+
+ 3. The names of the authors may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+ INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+ OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This program is based on JZlib 1.0.2 ymnk, JCraft,Inc.
+ * JZlib is based on zlib-1.1.3, so all credit should go authors
+ * Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+ * and contributors of zlib.
+ */
+
+(function(global) {
+ "use strict";
+
+ // Global
+ var MAX_BITS = 15;
+
+ var Z_OK = 0;
+ var Z_STREAM_END = 1;
+ var Z_NEED_DICT = 2;
+ var Z_STREAM_ERROR = -2;
+ var Z_DATA_ERROR = -3;
+ var Z_MEM_ERROR = -4;
+ var Z_BUF_ERROR = -5;
+
+ var inflate_mask = [ 0x00000000, 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 0x000001ff, 0x000003ff,
+ 0x000007ff, 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff ];
+
+ var MANY = 1440;
+
+ // JZlib version : "1.0.2"
+ var Z_NO_FLUSH = 0;
+ var Z_FINISH = 4;
+
+ // InfTree
+ var fixed_bl = 9;
+ var fixed_bd = 5;
+
+ var fixed_tl = [ 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 192, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 160, 0, 8, 0,
+ 0, 8, 128, 0, 8, 64, 0, 9, 224, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 144, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 208, 81, 7, 17, 0, 8, 104, 0, 8, 40,
+ 0, 9, 176, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 240, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 200, 81, 7, 13,
+ 0, 8, 100, 0, 8, 36, 0, 9, 168, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 232, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 152, 84, 7, 83, 0, 8, 124, 0, 8, 60,
+ 0, 9, 216, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 184, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 248, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7,
+ 35, 0, 8, 114, 0, 8, 50, 0, 9, 196, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 164, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 228, 80, 7, 7, 0, 8, 90, 0, 8,
+ 26, 0, 9, 148, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 212, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 180, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 244, 80,
+ 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 204, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 172, 0, 8, 6, 0, 8, 134, 0,
+ 8, 70, 0, 9, 236, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 156, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 220, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 188, 0,
+ 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 252, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 194, 80, 7, 10, 0, 8, 97,
+ 0, 8, 33, 0, 9, 162, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 226, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 146, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 210,
+ 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 178, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 242, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117,
+ 0, 8, 53, 0, 9, 202, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 170, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 234, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 154,
+ 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 218, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 186, 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 250, 80, 7, 3, 0, 8, 83,
+ 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 198, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 166, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 230,
+ 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 150, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 214, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 182, 0, 8, 11, 0, 8, 139,
+ 0, 8, 75, 0, 9, 246, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 206, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 174,
+ 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 238, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 158, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 222, 82, 7, 27, 0, 8, 111,
+ 0, 8, 47, 0, 9, 190, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 254, 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9,
+ 193, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 161, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 225, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 145, 83, 7, 59, 0, 8,
+ 120, 0, 8, 56, 0, 9, 209, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 177, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 241, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8,
+ 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 201, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 169, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 233, 80, 7, 8, 0, 8,
+ 92, 0, 8, 28, 0, 9, 153, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 217, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 185, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9,
+ 249, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 197, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 165, 0, 8, 2, 0, 8,
+ 130, 0, 8, 66, 0, 9, 229, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 149, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 213, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9,
+ 181, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 245, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 205, 81, 7, 15, 0, 8,
+ 102, 0, 8, 38, 0, 9, 173, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 237, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 157, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9,
+ 221, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 189, 0, 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 253, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0,
+ 8, 113, 0, 8, 49, 0, 9, 195, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 163, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 227, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9,
+ 147, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 211, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 179, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 243, 80, 7, 4, 0, 8,
+ 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 203, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 171, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9,
+ 235, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 155, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 219, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 187, 0, 8, 13, 0, 8,
+ 141, 0, 8, 77, 0, 9, 251, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 199, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9,
+ 167, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 231, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 151, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 215, 82, 7, 19, 0, 8,
+ 107, 0, 8, 43, 0, 9, 183, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 247, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9,
+ 207, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 175, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 239, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 159, 84, 7, 99, 0, 8,
+ 127, 0, 8, 63, 0, 9, 223, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 191, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 255 ];
+ var fixed_td = [ 80, 5, 1, 87, 5, 257, 83, 5, 17, 91, 5, 4097, 81, 5, 5, 89, 5, 1025, 85, 5, 65, 93, 5, 16385, 80, 5, 3, 88, 5, 513, 84, 5, 33, 92, 5,
+ 8193, 82, 5, 9, 90, 5, 2049, 86, 5, 129, 192, 5, 24577, 80, 5, 2, 87, 5, 385, 83, 5, 25, 91, 5, 6145, 81, 5, 7, 89, 5, 1537, 85, 5, 97, 93, 5,
+ 24577, 80, 5, 4, 88, 5, 769, 84, 5, 49, 92, 5, 12289, 82, 5, 13, 90, 5, 3073, 86, 5, 193, 192, 5, 24577 ];
+
+ // Tables for deflate from PKZIP's appnote.txt.
+ var cplens = [ // Copy lengths for literal codes 257..285
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 ];
+
+ // see note #13 above about 258
+ var cplext = [ // Extra bits for literal codes 257..285
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112 // 112==invalid
+ ];
+
+ var cpdist = [ // Copy offsets for distance codes 0..29
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577 ];
+
+ var cpdext = [ // Extra bits for distance codes
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 ];
+
+ // If BMAX needs to be larger than 16, then h and x[] should be uLong.
+ var BMAX = 15; // maximum bit length of any code
+
+ function InfTree() {
+ var that = this;
+
+ var hn; // hufts used in space
+ var v; // work area for huft_build
+ var c; // bit length count table
+ var r; // table entry for structure assignment
+ var u; // table stack
+ var x; // bit offsets, then code stack
+
+ function huft_build(b, // code lengths in bits (all assumed <=
+ // BMAX)
+ bindex, n, // number of codes (assumed <= 288)
+ s, // number of simple-valued codes (0..s-1)
+ d, // list of base values for non-simple codes
+ e, // list of extra bits for non-simple codes
+ t, // result: starting table
+ m, // maximum lookup bits, returns actual
+ hp,// space for trees
+ hn,// hufts used in space
+ v // working area: values in order of bit length
+ ) {
+ // Given a list of code lengths and a maximum table size, make a set of
+ // tables to decode that set of codes. Return Z_OK on success,
+ // Z_BUF_ERROR
+ // if the given code set is incomplete (the tables are still built in
+ // this
+ // case), Z_DATA_ERROR if the input is invalid (an over-subscribed set
+ // of
+ // lengths), or Z_MEM_ERROR if not enough memory.
+
+ var a; // counter for codes of length k
+ var f; // i repeats in table every f entries
+ var g; // maximum code length
+ var h; // table level
+ var i; // counter, current code
+ var j; // counter
+ var k; // number of bits in current code
+ var l; // bits per table (returned in m)
+ var mask; // (1 << w) - 1, to avoid cc -O bug on HP
+ var p; // pointer into c[], b[], or v[]
+ var q; // points to current table
+ var w; // bits before this table == (l * h)
+ var xp; // pointer into x
+ var y; // number of dummy codes added
+ var z; // number of entries in current table
+
+ // Generate counts for each bit length
+
+ p = 0;
+ i = n;
+ do {
+ c[b[bindex + p]]++;
+ p++;
+ i--; // assume all entries <= BMAX
+ } while (i !== 0);
+
+ if (c[0] == n) { // null input--all zero length codes
+ t[0] = -1;
+ m[0] = 0;
+ return Z_OK;
+ }
+
+ // Find minimum and maximum length, bound *m by those
+ l = m[0];
+ for (j = 1; j <= BMAX; j++)
+ if (c[j] !== 0)
+ break;
+ k = j; // minimum code length
+ if (l < j) {
+ l = j;
+ }
+ for (i = BMAX; i !== 0; i--) {
+ if (c[i] !== 0)
+ break;
+ }
+ g = i; // maximum code length
+ if (l > i) {
+ l = i;
+ }
+ m[0] = l;
+
+ // Adjust last length count to fill out codes, if needed
+ for (y = 1 << j; j < i; j++, y <<= 1) {
+ if ((y -= c[j]) < 0) {
+ return Z_DATA_ERROR;
+ }
+ }
+ if ((y -= c[i]) < 0) {
+ return Z_DATA_ERROR;
+ }
+ c[i] += y;
+
+ // Generate starting offsets into the value table for each length
+ x[1] = j = 0;
+ p = 1;
+ xp = 2;
+ while (--i !== 0) { // note that i == g from above
+ x[xp] = (j += c[p]);
+ xp++;
+ p++;
+ }
+
+ // Make a table of values in order of bit lengths
+ i = 0;
+ p = 0;
+ do {
+ if ((j = b[bindex + p]) !== 0) {
+ v[x[j]++] = i;
+ }
+ p++;
+ } while (++i < n);
+ n = x[g]; // set n to length of v
+
+ // Generate the Huffman codes and for each, make the table entries
+ x[0] = i = 0; // first Huffman code is zero
+ p = 0; // grab values in bit order
+ h = -1; // no tables yet--level -1
+ w = -l; // bits decoded == (l * h)
+ u[0] = 0; // just to keep compilers happy
+ q = 0; // ditto
+ z = 0; // ditto
+
+ // go through the bit lengths (k already is bits in shortest code)
+ for (; k <= g; k++) {
+ a = c[k];
+ while (a-- !== 0) {
+ // here i is the Huffman code of length k bits for value *p
+ // make tables up to required level
+ while (k > w + l) {
+ h++;
+ w += l; // previous table always l bits
+ // compute minimum size table less than or equal to l bits
+ z = g - w;
+ z = (z > l) ? l : z; // table size upper limit
+ if ((f = 1 << (j = k - w)) > a + 1) { // try a k-w bit table
+ // too few codes for
+ // k-w bit table
+ f -= a + 1; // deduct codes from patterns left
+ xp = k;
+ if (j < z) {
+ while (++j < z) { // try smaller tables up to z bits
+ if ((f <<= 1) <= c[++xp])
+ break; // enough codes to use up j bits
+ f -= c[xp]; // else deduct codes from patterns
+ }
+ }
+ }
+ z = 1 << j; // table entries for j-bit table
+
+ // allocate new table
+ if (hn[0] + z > MANY) { // (note: doesn't matter for fixed)
+ return Z_DATA_ERROR; // overflow of MANY
+ }
+ u[h] = q = /* hp+ */hn[0]; // DEBUG
+ hn[0] += z;
+
+ // connect to last table, if there is one
+ if (h !== 0) {
+ x[h] = i; // save pattern for backing up
+ r[0] = /* (byte) */j; // bits in this table
+ r[1] = /* (byte) */l; // bits to dump before this table
+ j = i >>> (w - l);
+ r[2] = /* (int) */(q - u[h - 1] - j); // offset to this table
+ hp.set(r, (u[h - 1] + j) * 3);
+ // to
+ // last
+ // table
+ } else {
+ t[0] = q; // first table is returned result
+ }
+ }
+
+ // set up table entry in r
+ r[1] = /* (byte) */(k - w);
+ if (p >= n) {
+ r[0] = 128 + 64; // out of values--invalid code
+ } else if (v[p] < s) {
+ r[0] = /* (byte) */(v[p] < 256 ? 0 : 32 + 64); // 256 is
+ // end-of-block
+ r[2] = v[p++]; // simple code is just the value
+ } else {
+ r[0] = /* (byte) */(e[v[p] - s] + 16 + 64); // non-simple--look
+ // up in lists
+ r[2] = d[v[p++] - s];
+ }
+
+ // fill code-like entries with r
+ f = 1 << (k - w);
+ for (j = i >>> w; j < z; j += f) {
+ hp.set(r, (q + j) * 3);
+ }
+
+ // backwards increment the k-bit code i
+ for (j = 1 << (k - 1); (i & j) !== 0; j >>>= 1) {
+ i ^= j;
+ }
+ i ^= j;
+
+ // backup over finished tables
+ mask = (1 << w) - 1; // needed on HP, cc -O bug
+ while ((i & mask) != x[h]) {
+ h--; // don't need to update q
+ w -= l;
+ mask = (1 << w) - 1;
+ }
+ }
+ }
+ // Return Z_BUF_ERROR if we were given an incomplete table
+ return y !== 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+ }
+
+ function initWorkArea(vsize) {
+ var i;
+ if (!hn) {
+ hn = []; // []; //new Array(1);
+ v = []; // new Array(vsize);
+ c = new Int32Array(BMAX + 1); // new Array(BMAX + 1);
+ r = []; // new Array(3);
+ u = new Int32Array(BMAX); // new Array(BMAX);
+ x = new Int32Array(BMAX + 1); // new Array(BMAX + 1);
+ }
+ if (v.length < vsize) {
+ v = []; // new Array(vsize);
+ }
+ for (i = 0; i < vsize; i++) {
+ v[i] = 0;
+ }
+ for (i = 0; i < BMAX + 1; i++) {
+ c[i] = 0;
+ }
+ for (i = 0; i < 3; i++) {
+ r[i] = 0;
+ }
+ // for(int i=0; i 257)) {
+ if (result == Z_DATA_ERROR) {
+ z.msg = "oversubscribed distance tree";
+ } else if (result == Z_BUF_ERROR) {
+ z.msg = "incomplete distance tree";
+ result = Z_DATA_ERROR;
+ } else if (result != Z_MEM_ERROR) {
+ z.msg = "empty distance tree with lengths";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ return Z_OK;
+ };
+
+ }
+
+ InfTree.inflate_trees_fixed = function(bl, // literal desired/actual bit depth
+ bd, // distance desired/actual bit depth
+ tl,// literal/length tree result
+ td// distance tree result
+ ) {
+ bl[0] = fixed_bl;
+ bd[0] = fixed_bd;
+ tl[0] = fixed_tl;
+ td[0] = fixed_td;
+ return Z_OK;
+ };
+
+ // InfCodes
+
+ // waiting for "i:"=input,
+ // "o:"=output,
+ // "x:"=nothing
+ var START = 0; // x: set up for LEN
+ var LEN = 1; // i: get length/literal/eob next
+ var LENEXT = 2; // i: getting length extra (have base)
+ var DIST = 3; // i: get distance next
+ var DISTEXT = 4;// i: getting distance extra
+ var COPY = 5; // o: copying bytes in window, waiting
+ // for space
+ var LIT = 6; // o: got literal, waiting for output
+ // space
+ var WASH = 7; // o: got eob, possibly still output
+ // waiting
+ var END = 8; // x: got eob and all data flushed
+ var BADCODE = 9;// x: got error
+
+ function InfCodes() {
+ var that = this;
+
+ var mode; // current inflate_codes mode
+
+ // mode dependent information
+ var len = 0;
+
+ var tree; // pointer into tree
+ var tree_index = 0;
+ var need = 0; // bits needed
+
+ var lit = 0;
+
+ // if EXT or COPY, where and how much
+ var get = 0; // bits to get for extra
+ var dist = 0; // distance back to copy from
+
+ var lbits = 0; // ltree bits decoded per branch
+ var dbits = 0; // dtree bits decoder per branch
+ var ltree; // literal/length/eob tree
+ var ltree_index = 0; // literal/length/eob tree
+ var dtree; // distance tree
+ var dtree_index = 0; // distance tree
+
+ // Called with number of bytes left to write in window at least 258
+ // (the maximum string length) and number of input bytes available
+ // at least ten. The ten bytes are six bytes for the longest length/
+ // distance pair plus four bytes for overloading the bit buffer.
+
+ function inflate_fast(bl, bd, tl, tl_index, td, td_index, s, z) {
+ var t; // temporary pointer
+ var tp; // temporary pointer
+ var tp_index; // temporary pointer
+ var e; // extra bits or operation
+ var b; // bit buffer
+ var k; // bits in bit buffer
+ var p; // input data pointer
+ var n; // bytes available there
+ var q; // output window write pointer
+ var m; // bytes to end of window or read pointer
+ var ml; // mask for literal/length tree
+ var md; // mask for distance tree
+ var c; // bytes to copy
+ var d; // distance back to copy from
+ var r; // copy source pointer
+
+ var tp_index_t_3; // (tp_index+t)*3
+
+ // load input, output, bit values
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = s.bitb;
+ k = s.bitk;
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ // initialize masks
+ ml = inflate_mask[bl];
+ md = inflate_mask[bd];
+
+ // do until not enough input or output space for fast loop
+ do { // assume called with m >= 258 && n >= 10
+ // get literal/length code
+ while (k < (20)) { // max bits for literal/length code
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ t = b & ml;
+ tp = tl;
+ tp_index = tl_index;
+ tp_index_t_3 = (tp_index + t) * 3;
+ if ((e = tp[tp_index_t_3]) === 0) {
+ b >>= (tp[tp_index_t_3 + 1]);
+ k -= (tp[tp_index_t_3 + 1]);
+
+ s.window[q++] = /* (byte) */tp[tp_index_t_3 + 2];
+ m--;
+ continue;
+ }
+ do {
+
+ b >>= (tp[tp_index_t_3 + 1]);
+ k -= (tp[tp_index_t_3 + 1]);
+
+ if ((e & 16) !== 0) {
+ e &= 15;
+ c = tp[tp_index_t_3 + 2] + (/* (int) */b & inflate_mask[e]);
+
+ b >>= e;
+ k -= e;
+
+ // decode distance base of block to copy
+ while (k < (15)) { // max bits for distance code
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ t = b & md;
+ tp = td;
+ tp_index = td_index;
+ tp_index_t_3 = (tp_index + t) * 3;
+ e = tp[tp_index_t_3];
+
+ do {
+
+ b >>= (tp[tp_index_t_3 + 1]);
+ k -= (tp[tp_index_t_3 + 1]);
+
+ if ((e & 16) !== 0) {
+ // get extra bits to add to distance base
+ e &= 15;
+ while (k < (e)) { // get extra bits (up to 13)
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ d = tp[tp_index_t_3 + 2] + (b & inflate_mask[e]);
+
+ b >>= (e);
+ k -= (e);
+
+ // do the copy
+ m -= c;
+ if (q >= d) { // offset before dest
+ // just copy
+ r = q - d;
+ if (q - r > 0 && 2 > (q - r)) {
+ s.window[q++] = s.window[r++]; // minimum
+ // count is
+ // three,
+ s.window[q++] = s.window[r++]; // so unroll
+ // loop a
+ // little
+ c -= 2;
+ } else {
+ s.window.set(s.window.subarray(r, r + 2), q);
+ q += 2;
+ r += 2;
+ c -= 2;
+ }
+ } else { // else offset after destination
+ r = q - d;
+ do {
+ r += s.end; // force pointer in window
+ } while (r < 0); // covers invalid distances
+ e = s.end - r;
+ if (c > e) { // if source crosses,
+ c -= e; // wrapped copy
+ if (q - r > 0 && e > (q - r)) {
+ do {
+ s.window[q++] = s.window[r++];
+ } while (--e !== 0);
+ } else {
+ s.window.set(s.window.subarray(r, r + e), q);
+ q += e;
+ r += e;
+ e = 0;
+ }
+ r = 0; // copy rest from start of window
+ }
+
+ }
+
+ // copy all or what's left
+ if (q - r > 0 && c > (q - r)) {
+ do {
+ s.window[q++] = s.window[r++];
+ } while (--c !== 0);
+ } else {
+ s.window.set(s.window.subarray(r, r + c), q);
+ q += c;
+ r += c;
+ c = 0;
+ }
+ break;
+ } else if ((e & 64) === 0) {
+ t += tp[tp_index_t_3 + 2];
+ t += (b & inflate_mask[e]);
+ tp_index_t_3 = (tp_index + t) * 3;
+ e = tp[tp_index_t_3];
+ } else {
+ z.msg = "invalid distance code";
+
+ c = z.avail_in - n;
+ c = (k >> 3) < c ? k >> 3 : c;
+ n += c;
+ p -= c;
+ k -= c << 3;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+
+ return Z_DATA_ERROR;
+ }
+ } while (true);
+ break;
+ }
+
+ if ((e & 64) === 0) {
+ t += tp[tp_index_t_3 + 2];
+ t += (b & inflate_mask[e]);
+ tp_index_t_3 = (tp_index + t) * 3;
+ if ((e = tp[tp_index_t_3]) === 0) {
+
+ b >>= (tp[tp_index_t_3 + 1]);
+ k -= (tp[tp_index_t_3 + 1]);
+
+ s.window[q++] = /* (byte) */tp[tp_index_t_3 + 2];
+ m--;
+ break;
+ }
+ } else if ((e & 32) !== 0) {
+
+ c = z.avail_in - n;
+ c = (k >> 3) < c ? k >> 3 : c;
+ n += c;
+ p -= c;
+ k -= c << 3;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+
+ return Z_STREAM_END;
+ } else {
+ z.msg = "invalid literal/length code";
+
+ c = z.avail_in - n;
+ c = (k >> 3) < c ? k >> 3 : c;
+ n += c;
+ p -= c;
+ k -= c << 3;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+
+ return Z_DATA_ERROR;
+ }
+ } while (true);
+ } while (m >= 258 && n >= 10);
+
+ // not enough input or output--restore pointers and return
+ c = z.avail_in - n;
+ c = (k >> 3) < c ? k >> 3 : c;
+ n += c;
+ p -= c;
+ k -= c << 3;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+
+ return Z_OK;
+ }
+
+ that.init = function(bl, bd, tl, tl_index, td, td_index) {
+ mode = START;
+ lbits = /* (byte) */bl;
+ dbits = /* (byte) */bd;
+ ltree = tl;
+ ltree_index = tl_index;
+ dtree = td;
+ dtree_index = td_index;
+ tree = null;
+ };
+
+ that.proc = function(s, z, r) {
+ var j; // temporary storage
+ var tindex; // temporary pointer
+ var e; // extra bits or operation
+ var b = 0; // bit buffer
+ var k = 0; // bits in bit buffer
+ var p = 0; // input data pointer
+ var n; // bytes available there
+ var q; // output window write pointer
+ var m; // bytes to end of window or read pointer
+ var f; // pointer to copy strings from
+
+ // copy input/output information to locals (UPDATE macro restores)
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = s.bitb;
+ k = s.bitk;
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ // process input and output based on current state
+ while (true) {
+ switch (mode) {
+ // waiting for "i:"=input, "o:"=output, "x:"=nothing
+ case START: // x: set up for LEN
+ if (m >= 258 && n >= 10) {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ r = inflate_fast(lbits, dbits, ltree, ltree_index, dtree, dtree_index, s, z);
+
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = s.bitb;
+ k = s.bitk;
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ if (r != Z_OK) {
+ mode = r == Z_STREAM_END ? WASH : BADCODE;
+ break;
+ }
+ }
+ need = lbits;
+ tree = ltree;
+ tree_index = ltree_index;
+
+ mode = LEN;
+ /* falls through */
+ case LEN: // i: get length/literal/eob next
+ j = need;
+
+ while (k < (j)) {
+ if (n !== 0)
+ r = Z_OK;
+ else {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ tindex = (tree_index + (b & inflate_mask[j])) * 3;
+
+ b >>>= (tree[tindex + 1]);
+ k -= (tree[tindex + 1]);
+
+ e = tree[tindex];
+
+ if (e === 0) { // literal
+ lit = tree[tindex + 2];
+ mode = LIT;
+ break;
+ }
+ if ((e & 16) !== 0) { // length
+ get = e & 15;
+ len = tree[tindex + 2];
+ mode = LENEXT;
+ break;
+ }
+ if ((e & 64) === 0) { // next table
+ need = e;
+ tree_index = tindex / 3 + tree[tindex + 2];
+ break;
+ }
+ if ((e & 32) !== 0) { // end of block
+ mode = WASH;
+ break;
+ }
+ mode = BADCODE; // invalid code
+ z.msg = "invalid literal/length code";
+ r = Z_DATA_ERROR;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+
+ case LENEXT: // i: getting length extra (have base)
+ j = get;
+
+ while (k < (j)) {
+ if (n !== 0)
+ r = Z_OK;
+ else {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ len += (b & inflate_mask[j]);
+
+ b >>= j;
+ k -= j;
+
+ need = dbits;
+ tree = dtree;
+ tree_index = dtree_index;
+ mode = DIST;
+ /* falls through */
+ case DIST: // i: get distance next
+ j = need;
+
+ while (k < (j)) {
+ if (n !== 0)
+ r = Z_OK;
+ else {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ tindex = (tree_index + (b & inflate_mask[j])) * 3;
+
+ b >>= tree[tindex + 1];
+ k -= tree[tindex + 1];
+
+ e = (tree[tindex]);
+ if ((e & 16) !== 0) { // distance
+ get = e & 15;
+ dist = tree[tindex + 2];
+ mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) === 0) { // next table
+ need = e;
+ tree_index = tindex / 3 + tree[tindex + 2];
+ break;
+ }
+ mode = BADCODE; // invalid code
+ z.msg = "invalid distance code";
+ r = Z_DATA_ERROR;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+
+ case DISTEXT: // i: getting distance extra
+ j = get;
+
+ while (k < (j)) {
+ if (n !== 0)
+ r = Z_OK;
+ else {
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ dist += (b & inflate_mask[j]);
+
+ b >>= j;
+ k -= j;
+
+ mode = COPY;
+ /* falls through */
+ case COPY: // o: copying bytes in window, waiting for space
+ f = q - dist;
+ while (f < 0) { // modulo window size-"while" instead
+ f += s.end; // of "if" handles invalid distances
+ }
+ while (len !== 0) {
+
+ if (m === 0) {
+ if (q == s.end && s.read !== 0) {
+ q = 0;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+ }
+ if (m === 0) {
+ s.write = q;
+ r = s.inflate_flush(z, r);
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ if (q == s.end && s.read !== 0) {
+ q = 0;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+ }
+
+ if (m === 0) {
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ }
+ }
+
+ s.window[q++] = s.window[f++];
+ m--;
+
+ if (f == s.end)
+ f = 0;
+ len--;
+ }
+ mode = START;
+ break;
+ case LIT: // o: got literal, waiting for output space
+ if (m === 0) {
+ if (q == s.end && s.read !== 0) {
+ q = 0;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+ }
+ if (m === 0) {
+ s.write = q;
+ r = s.inflate_flush(z, r);
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ if (q == s.end && s.read !== 0) {
+ q = 0;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+ }
+ if (m === 0) {
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ }
+ }
+ r = Z_OK;
+
+ s.window[q++] = /* (byte) */lit;
+ m--;
+
+ mode = START;
+ break;
+ case WASH: // o: got eob, possibly more output
+ if (k > 7) { // return unused byte, if any
+ k -= 8;
+ n++;
+ p--; // can always return one
+ }
+
+ s.write = q;
+ r = s.inflate_flush(z, r);
+ q = s.write;
+ m = q < s.read ? s.read - q - 1 : s.end - q;
+
+ if (s.read != s.write) {
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ mode = END;
+ /* falls through */
+ case END:
+ r = Z_STREAM_END;
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+
+ case BADCODE: // x: got error
+
+ r = Z_DATA_ERROR;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+
+ default:
+ r = Z_STREAM_ERROR;
+
+ s.bitb = b;
+ s.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ s.write = q;
+ return s.inflate_flush(z, r);
+ }
+ }
+ };
+
+ that.free = function() {
+ // ZFREE(z, c);
+ };
+
+ }
+
+ // InfBlocks
+
+ // Table for deflate from PKZIP's appnote.txt.
+ var border = [ // Order of the bit length code lengths
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];
+
+ var TYPE = 0; // get type bits (3, including end bit)
+ var LENS = 1; // get lengths for stored
+ var STORED = 2;// processing stored block
+ var TABLE = 3; // get table lengths
+ var BTREE = 4; // get bit lengths tree for a dynamic
+ // block
+ var DTREE = 5; // get length, distance trees for a
+ // dynamic block
+ var CODES = 6; // processing fixed or dynamic block
+ var DRY = 7; // output remaining window bytes
+ var DONELOCKS = 8; // finished last block, done
+ var BADBLOCKS = 9; // ot a data error--stuck here
+
+ function InfBlocks(z, w) {
+ var that = this;
+
+ var mode = TYPE; // current inflate_block mode
+
+ var left = 0; // if STORED, bytes left to copy
+
+ var table = 0; // table lengths (14 bits)
+ var index = 0; // index into blens (or border)
+ var blens; // bit lengths of codes
+ var bb = [ 0 ]; // bit length tree depth
+ var tb = [ 0 ]; // bit length decoding tree
+
+ var codes = new InfCodes(); // if CODES, current state
+
+ var last = 0; // true if this block is the last block
+
+ var hufts = new Int32Array(MANY * 3); // single malloc for tree space
+ var check = 0; // check on output
+ var inftree = new InfTree();
+
+ that.bitk = 0; // bits in bit buffer
+ that.bitb = 0; // bit buffer
+ that.window = new Uint8Array(w); // sliding window
+ that.end = w; // one byte after sliding window
+ that.read = 0; // window read pointer
+ that.write = 0; // window write pointer
+
+ that.reset = function(z, c) {
+ if (c)
+ c[0] = check;
+ // if (mode == BTREE || mode == DTREE) {
+ // }
+ if (mode == CODES) {
+ codes.free(z);
+ }
+ mode = TYPE;
+ that.bitk = 0;
+ that.bitb = 0;
+ that.read = that.write = 0;
+ };
+
+ that.reset(z, null);
+
+ // copy as much as possible from the sliding window to the output area
+ that.inflate_flush = function(z, r) {
+ var n;
+ var p;
+ var q;
+
+ // local copies of source and destination pointers
+ p = z.next_out_index;
+ q = that.read;
+
+ // compute number of bytes to copy as far as end of window
+ n = /* (int) */((q <= that.write ? that.write : that.end) - q);
+ if (n > z.avail_out)
+ n = z.avail_out;
+ if (n !== 0 && r == Z_BUF_ERROR)
+ r = Z_OK;
+
+ // update counters
+ z.avail_out -= n;
+ z.total_out += n;
+
+ // copy as far as end of window
+ z.next_out.set(that.window.subarray(q, q + n), p);
+ p += n;
+ q += n;
+
+ // see if more to copy at beginning of window
+ if (q == that.end) {
+ // wrap pointers
+ q = 0;
+ if (that.write == that.end)
+ that.write = 0;
+
+ // compute bytes to copy
+ n = that.write - q;
+ if (n > z.avail_out)
+ n = z.avail_out;
+ if (n !== 0 && r == Z_BUF_ERROR)
+ r = Z_OK;
+
+ // update counters
+ z.avail_out -= n;
+ z.total_out += n;
+
+ // copy
+ z.next_out.set(that.window.subarray(q, q + n), p);
+ p += n;
+ q += n;
+ }
+
+ // update pointers
+ z.next_out_index = p;
+ that.read = q;
+
+ // done
+ return r;
+ };
+
+ that.proc = function(z, r) {
+ var t; // temporary storage
+ var b; // bit buffer
+ var k; // bits in bit buffer
+ var p; // input data pointer
+ var n; // bytes available there
+ var q; // output window write pointer
+ var m; // bytes to end of window or read pointer
+
+ var i;
+
+ // copy input/output information to locals (UPDATE macro restores)
+ // {
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = that.bitb;
+ k = that.bitk;
+ // }
+ // {
+ q = that.write;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ // }
+
+ // process input based on current state
+ // DEBUG dtree
+ while (true) {
+ switch (mode) {
+ case TYPE:
+
+ while (k < (3)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+ t = /* (int) */(b & 7);
+ last = t & 1;
+
+ switch (t >>> 1) {
+ case 0: // stored
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+ t = k & 7; // go to byte boundary
+
+ // {
+ b >>>= (t);
+ k -= (t);
+ // }
+ mode = LENS; // get length of stored block
+ break;
+ case 1: // fixed
+ // {
+ var bl = []; // new Array(1);
+ var bd = []; // new Array(1);
+ var tl = [ [] ]; // new Array(1);
+ var td = [ [] ]; // new Array(1);
+
+ InfTree.inflate_trees_fixed(bl, bd, tl, td);
+ codes.init(bl[0], bd[0], tl[0], 0, td[0], 0);
+ // }
+
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+
+ mode = CODES;
+ break;
+ case 2: // dynamic
+
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+
+ mode = TABLE;
+ break;
+ case 3: // illegal
+
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+ mode = BADBLOCKS;
+ z.msg = "invalid block type";
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ break;
+ case LENS:
+
+ while (k < (32)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ if ((((~b) >>> 16) & 0xffff) != (b & 0xffff)) {
+ mode = BADBLOCKS;
+ z.msg = "invalid stored block lengths";
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ left = (b & 0xffff);
+ b = k = 0; // dump bits
+ mode = left !== 0 ? STORED : (last !== 0 ? DRY : TYPE);
+ break;
+ case STORED:
+ if (n === 0) {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+
+ if (m === 0) {
+ if (q == that.end && that.read !== 0) {
+ q = 0;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ }
+ if (m === 0) {
+ that.write = q;
+ r = that.inflate_flush(z, r);
+ q = that.write;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ if (q == that.end && that.read !== 0) {
+ q = 0;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ }
+ if (m === 0) {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ }
+ }
+ r = Z_OK;
+
+ t = left;
+ if (t > n)
+ t = n;
+ if (t > m)
+ t = m;
+ that.window.set(z.read_buf(p, t), q);
+ p += t;
+ n -= t;
+ q += t;
+ m -= t;
+ if ((left -= t) !== 0)
+ break;
+ mode = last !== 0 ? DRY : TYPE;
+ break;
+ case TABLE:
+
+ while (k < (14)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ table = t = (b & 0x3fff);
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) {
+ mode = BADBLOCKS;
+ z.msg = "too many length or distance symbols";
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (!blens || blens.length < t) {
+ blens = []; // new Array(t);
+ } else {
+ for (i = 0; i < t; i++) {
+ blens[i] = 0;
+ }
+ }
+
+ // {
+ b >>>= (14);
+ k -= (14);
+ // }
+
+ index = 0;
+ mode = BTREE;
+ /* falls through */
+ case BTREE:
+ while (index < 4 + (table >>> 10)) {
+ while (k < (3)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ blens[border[index++]] = b & 7;
+
+ // {
+ b >>>= (3);
+ k -= (3);
+ // }
+ }
+
+ while (index < 19) {
+ blens[border[index++]] = 0;
+ }
+
+ bb[0] = 7;
+ t = inftree.inflate_trees_bits(blens, bb, tb, hufts, z);
+ if (t != Z_OK) {
+ r = t;
+ if (r == Z_DATA_ERROR) {
+ blens = null;
+ mode = BADBLOCKS;
+ }
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+
+ index = 0;
+ mode = DTREE;
+ /* falls through */
+ case DTREE:
+ while (true) {
+ t = table;
+ if (index >= 258 + (t & 0x1f) + ((t >> 5) & 0x1f)) {
+ break;
+ }
+
+ var j, c;
+
+ t = bb[0];
+
+ while (k < (t)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ // if (tb[0] == -1) {
+ // System.err.println("null...");
+ // }
+
+ t = hufts[(tb[0] + (b & inflate_mask[t])) * 3 + 1];
+ c = hufts[(tb[0] + (b & inflate_mask[t])) * 3 + 2];
+
+ if (c < 16) {
+ b >>>= (t);
+ k -= (t);
+ blens[index++] = c;
+ } else { // c == 16..18
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+
+ while (k < (t + i)) {
+ if (n !== 0) {
+ r = Z_OK;
+ } else {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ n--;
+ b |= (z.read_byte(p++) & 0xff) << k;
+ k += 8;
+ }
+
+ b >>>= (t);
+ k -= (t);
+
+ j += (b & inflate_mask[i]);
+
+ b >>>= (i);
+ k -= (i);
+
+ i = index;
+ t = table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1)) {
+ blens = null;
+ mode = BADBLOCKS;
+ z.msg = "invalid bit length repeat";
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+
+ c = c == 16 ? blens[i - 1] : 0;
+ do {
+ blens[i++] = c;
+ } while (--j !== 0);
+ index = i;
+ }
+ }
+
+ tb[0] = -1;
+ // {
+ var bl_ = []; // new Array(1);
+ var bd_ = []; // new Array(1);
+ var tl_ = []; // new Array(1);
+ var td_ = []; // new Array(1);
+ bl_[0] = 9; // must be <= 9 for lookahead assumptions
+ bd_[0] = 6; // must be <= 9 for lookahead assumptions
+
+ t = table;
+ t = inftree.inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), blens, bl_, bd_, tl_, td_, hufts, z);
+
+ if (t != Z_OK) {
+ if (t == Z_DATA_ERROR) {
+ blens = null;
+ mode = BADBLOCKS;
+ }
+ r = t;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ codes.init(bl_[0], bd_[0], hufts, tl_[0], hufts, td_[0]);
+ // }
+ mode = CODES;
+ /* falls through */
+ case CODES:
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+
+ if ((r = codes.proc(that, z, r)) != Z_STREAM_END) {
+ return that.inflate_flush(z, r);
+ }
+ r = Z_OK;
+ codes.free(z);
+
+ p = z.next_in_index;
+ n = z.avail_in;
+ b = that.bitb;
+ k = that.bitk;
+ q = that.write;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+
+ if (last === 0) {
+ mode = TYPE;
+ break;
+ }
+ mode = DRY;
+ /* falls through */
+ case DRY:
+ that.write = q;
+ r = that.inflate_flush(z, r);
+ q = that.write;
+ m = /* (int) */(q < that.read ? that.read - q - 1 : that.end - q);
+ if (that.read != that.write) {
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ mode = DONELOCKS;
+ /* falls through */
+ case DONELOCKS:
+ r = Z_STREAM_END;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ case BADBLOCKS:
+ r = Z_DATA_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+
+ default:
+ r = Z_STREAM_ERROR;
+
+ that.bitb = b;
+ that.bitk = k;
+ z.avail_in = n;
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ that.write = q;
+ return that.inflate_flush(z, r);
+ }
+ }
+ };
+
+ that.free = function(z) {
+ that.reset(z, null);
+ that.window = null;
+ hufts = null;
+ // ZFREE(z, s);
+ };
+
+ that.set_dictionary = function(d, start, n) {
+ that.window.set(d.subarray(start, start + n), 0);
+ that.read = that.write = n;
+ };
+
+ // Returns true if inflate is currently at the end of a block generated
+ // by Z_SYNC_FLUSH or Z_FULL_FLUSH.
+ that.sync_point = function() {
+ return mode == LENS ? 1 : 0;
+ };
+
+ }
+
+ // Inflate
+
+ // preset dictionary flag in zlib header
+ var PRESET_DICT = 0x20;
+
+ var Z_DEFLATED = 8;
+
+ var METHOD = 0; // waiting for method byte
+ var FLAG = 1; // waiting for flag byte
+ var DICT4 = 2; // four dictionary check bytes to go
+ var DICT3 = 3; // three dictionary check bytes to go
+ var DICT2 = 4; // two dictionary check bytes to go
+ var DICT1 = 5; // one dictionary check byte to go
+ var DICT0 = 6; // waiting for inflateSetDictionary
+ var BLOCKS = 7; // decompressing blocks
+ var DONE = 12; // finished check, done
+ var BAD = 13; // got an error--stay here
+
+ var mark = [ 0, 0, 0xff, 0xff ];
+
+ function Inflate() {
+ var that = this;
+
+ that.mode = 0; // current inflate mode
+
+ // mode dependent information
+ that.method = 0; // if FLAGS, method byte
+
+ // if CHECK, check values to compare
+ that.was = [ 0 ]; // new Array(1); // computed check value
+ that.need = 0; // stream check value
+
+ // if BAD, inflateSync's marker bytes count
+ that.marker = 0;
+
+ // mode independent information
+ that.wbits = 0; // log2(window size) (8..15, defaults to 15)
+
+ // this.blocks; // current inflate_blocks state
+
+ function inflateReset(z) {
+ if (!z || !z.istate)
+ return Z_STREAM_ERROR;
+
+ z.total_in = z.total_out = 0;
+ z.msg = null;
+ z.istate.mode = BLOCKS;
+ z.istate.blocks.reset(z, null);
+ return Z_OK;
+ }
+
+ that.inflateEnd = function(z) {
+ if (that.blocks)
+ that.blocks.free(z);
+ that.blocks = null;
+ // ZFREE(z, z->state);
+ return Z_OK;
+ };
+
+ that.inflateInit = function(z, w) {
+ z.msg = null;
+ that.blocks = null;
+
+ // set window size
+ if (w < 8 || w > 15) {
+ that.inflateEnd(z);
+ return Z_STREAM_ERROR;
+ }
+ that.wbits = w;
+
+ z.istate.blocks = new InfBlocks(z, 1 << w);
+
+ // reset state
+ inflateReset(z);
+ return Z_OK;
+ };
+
+ that.inflate = function(z, f) {
+ var r;
+ var b;
+
+ if (!z || !z.istate || !z.next_in)
+ return Z_STREAM_ERROR;
+ f = f == Z_FINISH ? Z_BUF_ERROR : Z_OK;
+ r = Z_BUF_ERROR;
+ while (true) {
+ // System.out.println("mode: "+z.istate.mode);
+ switch (z.istate.mode) {
+ case METHOD:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ if (((z.istate.method = z.read_byte(z.next_in_index++)) & 0xf) != Z_DEFLATED) {
+ z.istate.mode = BAD;
+ z.msg = "unknown compression method";
+ z.istate.marker = 5; // can't try inflateSync
+ break;
+ }
+ if ((z.istate.method >> 4) + 8 > z.istate.wbits) {
+ z.istate.mode = BAD;
+ z.msg = "invalid window size";
+ z.istate.marker = 5; // can't try inflateSync
+ break;
+ }
+ z.istate.mode = FLAG;
+ /* falls through */
+ case FLAG:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ b = (z.read_byte(z.next_in_index++)) & 0xff;
+
+ if ((((z.istate.method << 8) + b) % 31) !== 0) {
+ z.istate.mode = BAD;
+ z.msg = "incorrect header check";
+ z.istate.marker = 5; // can't try inflateSync
+ break;
+ }
+
+ if ((b & PRESET_DICT) === 0) {
+ z.istate.mode = BLOCKS;
+ break;
+ }
+ z.istate.mode = DICT4;
+ /* falls through */
+ case DICT4:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ z.istate.need = ((z.read_byte(z.next_in_index++) & 0xff) << 24) & 0xff000000;
+ z.istate.mode = DICT3;
+ /* falls through */
+ case DICT3:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ z.istate.need += ((z.read_byte(z.next_in_index++) & 0xff) << 16) & 0xff0000;
+ z.istate.mode = DICT2;
+ /* falls through */
+ case DICT2:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ z.istate.need += ((z.read_byte(z.next_in_index++) & 0xff) << 8) & 0xff00;
+ z.istate.mode = DICT1;
+ /* falls through */
+ case DICT1:
+
+ if (z.avail_in === 0)
+ return r;
+ r = f;
+
+ z.avail_in--;
+ z.total_in++;
+ z.istate.need += (z.read_byte(z.next_in_index++) & 0xff);
+ z.istate.mode = DICT0;
+ return Z_NEED_DICT;
+ case DICT0:
+ z.istate.mode = BAD;
+ z.msg = "need dictionary";
+ z.istate.marker = 0; // can try inflateSync
+ return Z_STREAM_ERROR;
+ case BLOCKS:
+
+ r = z.istate.blocks.proc(z, r);
+ if (r == Z_DATA_ERROR) {
+ z.istate.mode = BAD;
+ z.istate.marker = 0; // can try inflateSync
+ break;
+ }
+ if (r == Z_OK) {
+ r = f;
+ }
+ if (r != Z_STREAM_END) {
+ return r;
+ }
+ r = f;
+ z.istate.blocks.reset(z, z.istate.was);
+ z.istate.mode = DONE;
+ /* falls through */
+ case DONE:
+ return Z_STREAM_END;
+ case BAD:
+ return Z_DATA_ERROR;
+ default:
+ return Z_STREAM_ERROR;
+ }
+ }
+ };
+
+ that.inflateSetDictionary = function(z, dictionary, dictLength) {
+ var index = 0;
+ var length = dictLength;
+ if (!z || !z.istate || z.istate.mode != DICT0)
+ return Z_STREAM_ERROR;
+
+ if (length >= (1 << z.istate.wbits)) {
+ length = (1 << z.istate.wbits) - 1;
+ index = dictLength - length;
+ }
+ z.istate.blocks.set_dictionary(dictionary, index, length);
+ z.istate.mode = BLOCKS;
+ return Z_OK;
+ };
+
+ that.inflateSync = function(z) {
+ var n; // number of bytes to look at
+ var p; // pointer to bytes
+ var m; // number of marker bytes found in a row
+ var r, w; // temporaries to save total_in and total_out
+
+ // set up
+ if (!z || !z.istate)
+ return Z_STREAM_ERROR;
+ if (z.istate.mode != BAD) {
+ z.istate.mode = BAD;
+ z.istate.marker = 0;
+ }
+ if ((n = z.avail_in) === 0)
+ return Z_BUF_ERROR;
+ p = z.next_in_index;
+ m = z.istate.marker;
+
+ // search
+ while (n !== 0 && m < 4) {
+ if (z.read_byte(p) == mark[m]) {
+ m++;
+ } else if (z.read_byte(p) !== 0) {
+ m = 0;
+ } else {
+ m = 4 - m;
+ }
+ p++;
+ n--;
+ }
+
+ // restore
+ z.total_in += p - z.next_in_index;
+ z.next_in_index = p;
+ z.avail_in = n;
+ z.istate.marker = m;
+
+ // return no joy or set up to restart on a new block
+ if (m != 4) {
+ return Z_DATA_ERROR;
+ }
+ r = z.total_in;
+ w = z.total_out;
+ inflateReset(z);
+ z.total_in = r;
+ z.total_out = w;
+ z.istate.mode = BLOCKS;
+ return Z_OK;
+ };
+
+ // Returns true if inflate is currently at the end of a block generated
+ // by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ // implementation to provide an additional safety check. PPP uses
+ // Z_SYNC_FLUSH
+ // but removes the length bytes of the resulting empty stored block. When
+ // decompressing, PPP checks that at the end of input packet, inflate is
+ // waiting for these length bytes.
+ that.inflateSyncPoint = function(z) {
+ if (!z || !z.istate || !z.istate.blocks)
+ return Z_STREAM_ERROR;
+ return z.istate.blocks.sync_point();
+ };
+ }
+
+ // ZStream
+
+ function ZStream() {
+ }
+
+ ZStream.prototype = {
+ inflateInit : function(bits) {
+ var that = this;
+ that.istate = new Inflate();
+ if (!bits)
+ bits = MAX_BITS;
+ return that.istate.inflateInit(that, bits);
+ },
+
+ inflate : function(f) {
+ var that = this;
+ if (!that.istate)
+ return Z_STREAM_ERROR;
+ return that.istate.inflate(that, f);
+ },
+
+ inflateEnd : function() {
+ var that = this;
+ if (!that.istate)
+ return Z_STREAM_ERROR;
+ var ret = that.istate.inflateEnd(that);
+ that.istate = null;
+ return ret;
+ },
+
+ inflateSync : function() {
+ var that = this;
+ if (!that.istate)
+ return Z_STREAM_ERROR;
+ return that.istate.inflateSync(that);
+ },
+ inflateSetDictionary : function(dictionary, dictLength) {
+ var that = this;
+ if (!that.istate)
+ return Z_STREAM_ERROR;
+ return that.istate.inflateSetDictionary(that, dictionary, dictLength);
+ },
+ read_byte : function(start) {
+ var that = this;
+ return that.next_in.subarray(start, start + 1)[0];
+ },
+ read_buf : function(start, size) {
+ var that = this;
+ return that.next_in.subarray(start, start + size);
+ }
+ };
+
+ // Inflater
+
+ function Inflater() {
+ var that = this;
+ var z = new ZStream();
+ var bufsize = 512;
+ var flush = Z_NO_FLUSH;
+ var buf = new Uint8Array(bufsize);
+ var nomoreinput = false;
+
+ z.inflateInit();
+ z.next_out = buf;
+
+ that.append = function(data, onprogress) {
+ var err, buffers = [], lastIndex = 0, bufferIndex = 0, bufferSize = 0, array;
+ if (data.length === 0)
+ return;
+ z.next_in_index = 0;
+ z.next_in = data;
+ z.avail_in = data.length;
+ do {
+ z.next_out_index = 0;
+ z.avail_out = bufsize;
+ if ((z.avail_in === 0) && (!nomoreinput)) { // if buffer is empty and more input is available, refill it
+ z.next_in_index = 0;
+ nomoreinput = true;
+ }
+ err = z.inflate(flush);
+ if (nomoreinput && (err === Z_BUF_ERROR)) {
+ if (z.avail_in !== 0)
+ throw new Error("inflating: bad input");
+ } else if (err !== Z_OK && err !== Z_STREAM_END)
+ throw new Error("inflating: " + z.msg);
+ if ((nomoreinput || err === Z_STREAM_END) && (z.avail_in === data.length))
+ throw new Error("inflating: bad input");
+ if (z.next_out_index)
+ if (z.next_out_index === bufsize)
+ buffers.push(new Uint8Array(buf));
+ else
+ buffers.push(new Uint8Array(buf.subarray(0, z.next_out_index)));
+ bufferSize += z.next_out_index;
+ if (onprogress && z.next_in_index > 0 && z.next_in_index != lastIndex) {
+ onprogress(z.next_in_index);
+ lastIndex = z.next_in_index;
+ }
+ } while (z.avail_in > 0 || z.avail_out === 0);
+ array = new Uint8Array(bufferSize);
+ buffers.forEach(function(chunk) {
+ array.set(chunk, bufferIndex);
+ bufferIndex += chunk.length;
+ });
+ return array;
+ };
+ that.flush = function() {
+ z.inflateEnd();
+ };
+ }
+
+ // 'zip' may not be defined in z-worker and some tests
+ var env = global.zip || global;
+ env.Inflater = env._jzlib_Inflater = Inflater;
+})(this);
diff --git a/sync/vendor/zipjs/z-worker.js b/sync/vendor/zipjs/z-worker.js
new file mode 100755
index 00000000..3e4019e8
--- /dev/null
+++ b/sync/vendor/zipjs/z-worker.js
@@ -0,0 +1,153 @@
+/* jshint worker:true */
+(function main(global) {
+ "use strict";
+
+ if (global.zWorkerInitialized)
+ throw new Error('z-worker.js should be run only once');
+ global.zWorkerInitialized = true;
+
+ addEventListener("message", function(event) {
+ var message = event.data, type = message.type, sn = message.sn;
+ var handler = handlers[type];
+ if (handler) {
+ try {
+ handler(message);
+ } catch (e) {
+ onError(type, sn, e);
+ }
+ }
+ //for debug
+ //postMessage({type: 'echo', originalType: type, sn: sn});
+ });
+
+ var handlers = {
+ importScripts: doImportScripts,
+ newTask: newTask,
+ append: processData,
+ flush: processData,
+ };
+
+ // deflater/inflater tasks indexed by serial numbers
+ var tasks = {};
+
+ function doImportScripts(msg) {
+ if (msg.scripts && msg.scripts.length > 0)
+ importScripts.apply(undefined, msg.scripts);
+ postMessage({type: 'importScripts'});
+ }
+
+ function newTask(msg) {
+ var CodecClass = global[msg.codecClass];
+ var sn = msg.sn;
+ if (tasks[sn])
+ throw Error('duplicated sn');
+ tasks[sn] = {
+ codec: new CodecClass(msg.options),
+ crcInput: msg.crcType === 'input',
+ crcOutput: msg.crcType === 'output',
+ crc: new Crc32(),
+ };
+ postMessage({type: 'newTask', sn: sn});
+ }
+
+ // performance may not be supported
+ var now = global.performance ? global.performance.now.bind(global.performance) : Date.now;
+
+ function processData(msg) {
+ var sn = msg.sn, type = msg.type, input = msg.data;
+ var task = tasks[sn];
+ // allow creating codec on first append
+ if (!task && msg.codecClass) {
+ newTask(msg);
+ task = tasks[sn];
+ }
+ var isAppend = type === 'append';
+ var start = now();
+ var output;
+ if (isAppend) {
+ try {
+ output = task.codec.append(input, function onprogress(loaded) {
+ postMessage({type: 'progress', sn: sn, loaded: loaded});
+ });
+ } catch (e) {
+ delete tasks[sn];
+ throw e;
+ }
+ } else {
+ delete tasks[sn];
+ output = task.codec.flush();
+ }
+ var codecTime = now() - start;
+
+ start = now();
+ if (input && task.crcInput)
+ task.crc.append(input);
+ if (output && task.crcOutput)
+ task.crc.append(output);
+ var crcTime = now() - start;
+
+ var rmsg = {type: type, sn: sn, codecTime: codecTime, crcTime: crcTime};
+ var transferables = [];
+ if (output) {
+ rmsg.data = output;
+ transferables.push(output.buffer);
+ }
+ if (!isAppend && (task.crcInput || task.crcOutput))
+ rmsg.crc = task.crc.get();
+
+ // posting a message with transferables will fail on IE10
+ try {
+ postMessage(rmsg, transferables);
+ } catch(ex) {
+ postMessage(rmsg); // retry without transferables
+ }
+ }
+
+ function onError(type, sn, e) {
+ var msg = {
+ type: type,
+ sn: sn,
+ error: formatError(e)
+ };
+ postMessage(msg);
+ }
+
+ function formatError(e) {
+ return { message: e.message, stack: e.stack };
+ }
+
+ // Crc32 code copied from file zip.js
+ function Crc32() {
+ this.crc = -1;
+ }
+ Crc32.prototype.append = function append(data) {
+ var crc = this.crc | 0, table = this.table;
+ for (var offset = 0, len = data.length | 0; offset < len; offset++)
+ crc = (crc >>> 8) ^ table[(crc ^ data[offset]) & 0xFF];
+ this.crc = crc;
+ };
+ Crc32.prototype.get = function get() {
+ return ~this.crc;
+ };
+ Crc32.prototype.table = (function() {
+ var i, j, t, table = []; // Uint32Array is actually slower than []
+ for (i = 0; i < 256; i++) {
+ t = i;
+ for (j = 0; j < 8; j++)
+ if (t & 1)
+ t = (t >>> 1) ^ 0xEDB88320;
+ else
+ t = t >>> 1;
+ table[i] = t;
+ }
+ return table;
+ })();
+
+ // "no-op" codec
+ function NOOP() {}
+ global.NOOP = NOOP;
+ NOOP.prototype.append = function append(bytes, onprogress) {
+ return bytes;
+ };
+ NOOP.prototype.flush = function flush() {};
+})(this);
diff --git a/sync/vendor/zipjs/zip.js b/sync/vendor/zipjs/zip.js
new file mode 100755
index 00000000..4f6f67e8
--- /dev/null
+++ b/sync/vendor/zipjs/zip.js
@@ -0,0 +1,966 @@
+/*
+ Copyright (c) 2013 Gildas Lormeau. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the distribution.
+
+ 3. The names of the authors may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+ INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+ OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+(function(obj) {
+ "use strict";
+
+ var ERR_BAD_FORMAT = "File format is not recognized.";
+ var ERR_CRC = "CRC failed.";
+ var ERR_ENCRYPTED = "File contains encrypted entry.";
+ var ERR_ZIP64 = "File is using Zip64 (4gb+ file size).";
+ var ERR_READ = "Error while reading zip file.";
+ var ERR_WRITE = "Error while writing zip file.";
+ var ERR_WRITE_DATA = "Error while writing file data.";
+ var ERR_READ_DATA = "Error while reading file data.";
+ var ERR_DUPLICATED_NAME = "File already exists.";
+ var CHUNK_SIZE = 512 * 1024;
+
+ var TEXT_PLAIN = "text/plain";
+
+ var appendABViewSupported;
+ try {
+ appendABViewSupported = new Blob([ new DataView(new ArrayBuffer(0)) ]).size === 0;
+ } catch (e) {
+ }
+
+ function Crc32() {
+ this.crc = -1;
+ }
+ Crc32.prototype.append = function append(data) {
+ var crc = this.crc | 0, table = this.table;
+ for (var offset = 0, len = data.length | 0; offset < len; offset++)
+ crc = (crc >>> 8) ^ table[(crc ^ data[offset]) & 0xFF];
+ this.crc = crc;
+ };
+ Crc32.prototype.get = function get() {
+ return ~this.crc;
+ };
+ Crc32.prototype.table = (function() {
+ var i, j, t, table = []; // Uint32Array is actually slower than []
+ for (i = 0; i < 256; i++) {
+ t = i;
+ for (j = 0; j < 8; j++)
+ if (t & 1)
+ t = (t >>> 1) ^ 0xEDB88320;
+ else
+ t = t >>> 1;
+ table[i] = t;
+ }
+ return table;
+ })();
+
+ // "no-op" codec
+ function NOOP() {}
+ NOOP.prototype.append = function append(bytes, onprogress) {
+ return bytes;
+ };
+ NOOP.prototype.flush = function flush() {};
+
+ function blobSlice(blob, index, length) {
+ if (index < 0 || length < 0 || index + length > blob.size)
+ throw new RangeError('offset:' + index + ', length:' + length + ', size:' + blob.size);
+ if (blob.slice)
+ return blob.slice(index, index + length);
+ else if (blob.webkitSlice)
+ return blob.webkitSlice(index, index + length);
+ else if (blob.mozSlice)
+ return blob.mozSlice(index, index + length);
+ else if (blob.msSlice)
+ return blob.msSlice(index, index + length);
+ }
+
+ function getDataHelper(byteLength, bytes) {
+ var dataBuffer, dataArray;
+ dataBuffer = new ArrayBuffer(byteLength);
+ dataArray = new Uint8Array(dataBuffer);
+ if (bytes)
+ dataArray.set(bytes, 0);
+ return {
+ buffer : dataBuffer,
+ array : dataArray,
+ view : new DataView(dataBuffer)
+ };
+ }
+
+ // Readers
+ function Reader() {
+ }
+
+ function TextReader(text) {
+ var that = this, blobReader;
+
+ function init(callback, onerror) {
+ var blob = new Blob([ text ], {
+ type : TEXT_PLAIN
+ });
+ blobReader = new BlobReader(blob);
+ blobReader.init(function() {
+ that.size = blobReader.size;
+ callback();
+ }, onerror);
+ }
+
+ function readUint8Array(index, length, callback, onerror) {
+ blobReader.readUint8Array(index, length, callback, onerror);
+ }
+
+ that.size = 0;
+ that.init = init;
+ that.readUint8Array = readUint8Array;
+ }
+ TextReader.prototype = new Reader();
+ TextReader.prototype.constructor = TextReader;
+
+ function Data64URIReader(dataURI) {
+ var that = this, dataStart;
+
+ function init(callback) {
+ var dataEnd = dataURI.length;
+ while (dataURI.charAt(dataEnd - 1) == "=")
+ dataEnd--;
+ dataStart = dataURI.indexOf(",") + 1;
+ that.size = Math.floor((dataEnd - dataStart) * 0.75);
+ callback();
+ }
+
+ function readUint8Array(index, length, callback) {
+ var i, data = getDataHelper(length);
+ var start = Math.floor(index / 3) * 4;
+ var end = Math.ceil((index + length) / 3) * 4;
+ var bytes = obj.atob(dataURI.substring(start + dataStart, end + dataStart));
+ var delta = index - Math.floor(start / 4) * 3;
+ for (i = delta; i < delta + length; i++)
+ data.array[i - delta] = bytes.charCodeAt(i);
+ callback(data.array);
+ }
+
+ that.size = 0;
+ that.init = init;
+ that.readUint8Array = readUint8Array;
+ }
+ Data64URIReader.prototype = new Reader();
+ Data64URIReader.prototype.constructor = Data64URIReader;
+
+ function BlobReader(blob) {
+ var that = this;
+
+ function init(callback) {
+ that.size = blob.size;
+ callback();
+ }
+
+ function readUint8Array(index, length, callback, onerror) {
+ var reader = new FileReader();
+ reader.onload = function(e) {
+ callback(new Uint8Array(e.target.result));
+ };
+ reader.onerror = onerror;
+ try {
+ reader.readAsArrayBuffer(blobSlice(blob, index, length));
+ } catch (e) {
+ onerror(e);
+ }
+ }
+
+ that.size = 0;
+ that.init = init;
+ that.readUint8Array = readUint8Array;
+ }
+ BlobReader.prototype = new Reader();
+ BlobReader.prototype.constructor = BlobReader;
+
+ // Writers
+
+ function Writer() {
+ }
+ Writer.prototype.getData = function(callback) {
+ callback(this.data);
+ };
+
+ function TextWriter(encoding) {
+ var that = this, blob;
+
+ function init(callback) {
+ blob = new Blob([], {
+ type : TEXT_PLAIN
+ });
+ callback();
+ }
+
+ function writeUint8Array(array, callback) {
+ blob = new Blob([ blob, appendABViewSupported ? array : array.buffer ], {
+ type : TEXT_PLAIN
+ });
+ callback();
+ }
+
+ function getData(callback, onerror) {
+ var reader = new FileReader();
+ reader.onload = function(e) {
+ callback(e.target.result);
+ };
+ reader.onerror = onerror;
+ reader.readAsText(blob, encoding);
+ }
+
+ that.init = init;
+ that.writeUint8Array = writeUint8Array;
+ that.getData = getData;
+ }
+ TextWriter.prototype = new Writer();
+ TextWriter.prototype.constructor = TextWriter;
+
+ function Data64URIWriter(contentType) {
+ var that = this, data = "", pending = "";
+
+ function init(callback) {
+ data += "data:" + (contentType || "") + ";base64,";
+ callback();
+ }
+
+ function writeUint8Array(array, callback) {
+ var i, delta = pending.length, dataString = pending;
+ pending = "";
+ for (i = 0; i < (Math.floor((delta + array.length) / 3) * 3) - delta; i++)
+ dataString += String.fromCharCode(array[i]);
+ for (; i < array.length; i++)
+ pending += String.fromCharCode(array[i]);
+ if (dataString.length > 2)
+ data += obj.btoa(dataString);
+ else
+ pending = dataString;
+ callback();
+ }
+
+ function getData(callback) {
+ callback(data + obj.btoa(pending));
+ }
+
+ that.init = init;
+ that.writeUint8Array = writeUint8Array;
+ that.getData = getData;
+ }
+ Data64URIWriter.prototype = new Writer();
+ Data64URIWriter.prototype.constructor = Data64URIWriter;
+
+ function BlobWriter(contentType) {
+ var blob, that = this;
+
+ function init(callback) {
+ blob = new Blob([], {
+ type : contentType
+ });
+ callback();
+ }
+
+ function writeUint8Array(array, callback) {
+ blob = new Blob([ blob, appendABViewSupported ? array : array.buffer ], {
+ type : contentType
+ });
+ callback();
+ }
+
+ function getData(callback) {
+ callback(blob);
+ }
+
+ that.init = init;
+ that.writeUint8Array = writeUint8Array;
+ that.getData = getData;
+ }
+ BlobWriter.prototype = new Writer();
+ BlobWriter.prototype.constructor = BlobWriter;
+
+ /**
+ * inflate/deflate core functions
+ * @param worker {Worker} web worker for the task.
+ * @param initialMessage {Object} initial message to be sent to the worker. should contain
+ * sn(serial number for distinguishing multiple tasks sent to the worker), and codecClass.
+ * This function may add more properties before sending.
+ */
+ function launchWorkerProcess(worker, initialMessage, reader, writer, offset, size, onprogress, onend, onreaderror, onwriteerror) {
+ var chunkIndex = 0, index, outputSize, sn = initialMessage.sn, crc;
+
+ function onflush() {
+ worker.removeEventListener('message', onmessage, false);
+ onend(outputSize, crc);
+ }
+
+ function onmessage(event) {
+ var message = event.data, data = message.data, err = message.error;
+ if (err) {
+ err.toString = function () { return 'Error: ' + this.message; };
+ onreaderror(err);
+ return;
+ }
+ if (message.sn !== sn)
+ return;
+ if (typeof message.codecTime === 'number')
+ worker.codecTime += message.codecTime; // should be before onflush()
+ if (typeof message.crcTime === 'number')
+ worker.crcTime += message.crcTime;
+
+ switch (message.type) {
+ case 'append':
+ if (data) {
+ outputSize += data.length;
+ writer.writeUint8Array(data, function() {
+ step();
+ }, onwriteerror);
+ } else
+ step();
+ break;
+ case 'flush':
+ crc = message.crc;
+ if (data) {
+ outputSize += data.length;
+ writer.writeUint8Array(data, function() {
+ onflush();
+ }, onwriteerror);
+ } else
+ onflush();
+ break;
+ case 'progress':
+ if (onprogress)
+ onprogress(index + message.loaded, size);
+ break;
+ case 'importScripts': //no need to handle here
+ case 'newTask':
+ case 'echo':
+ break;
+ default:
+ console.warn('zip.js:launchWorkerProcess: unknown message: ', message);
+ }
+ }
+
+ function step() {
+ index = chunkIndex * CHUNK_SIZE;
+ // use `<=` instead of `<`, because `size` may be 0.
+ if (index <= size) {
+ reader.readUint8Array(offset + index, Math.min(CHUNK_SIZE, size - index), function(array) {
+ if (onprogress)
+ onprogress(index, size);
+ var msg = index === 0 ? initialMessage : {sn : sn};
+ msg.type = 'append';
+ msg.data = array;
+
+ // posting a message with transferables will fail on IE10
+ try {
+ worker.postMessage(msg, [array.buffer]);
+ } catch(ex) {
+ worker.postMessage(msg); // retry without transferables
+ }
+ chunkIndex++;
+ }, onreaderror);
+ } else {
+ worker.postMessage({
+ sn: sn,
+ type: 'flush'
+ });
+ }
+ }
+
+ outputSize = 0;
+ worker.addEventListener('message', onmessage, false);
+ step();
+ }
+
+ function launchProcess(process, reader, writer, offset, size, crcType, onprogress, onend, onreaderror, onwriteerror) {
+ var chunkIndex = 0, index, outputSize = 0,
+ crcInput = crcType === 'input',
+ crcOutput = crcType === 'output',
+ crc = new Crc32();
+ function step() {
+ var outputData;
+ index = chunkIndex * CHUNK_SIZE;
+ if (index < size)
+ reader.readUint8Array(offset + index, Math.min(CHUNK_SIZE, size - index), function(inputData) {
+ var outputData;
+ try {
+ outputData = process.append(inputData, function(loaded) {
+ if (onprogress)
+ onprogress(index + loaded, size);
+ });
+ } catch (e) {
+ onreaderror(e);
+ return;
+ }
+ if (outputData) {
+ outputSize += outputData.length;
+ writer.writeUint8Array(outputData, function() {
+ chunkIndex++;
+ setTimeout(step, 1);
+ }, onwriteerror);
+ if (crcOutput)
+ crc.append(outputData);
+ } else {
+ chunkIndex++;
+ setTimeout(step, 1);
+ }
+ if (crcInput)
+ crc.append(inputData);
+ if (onprogress)
+ onprogress(index, size);
+ }, onreaderror);
+ else {
+ try {
+ outputData = process.flush();
+ } catch (e) {
+ onreaderror(e);
+ return;
+ }
+ if (outputData) {
+ if (crcOutput)
+ crc.append(outputData);
+ outputSize += outputData.length;
+ writer.writeUint8Array(outputData, function() {
+ onend(outputSize, crc.get());
+ }, onwriteerror);
+ } else
+ onend(outputSize, crc.get());
+ }
+ }
+
+ step();
+ }
+
+ function inflate(worker, sn, reader, writer, offset, size, computeCrc32, onend, onprogress, onreaderror, onwriteerror) {
+ var crcType = computeCrc32 ? 'output' : 'none';
+ if (obj.zip.useWebWorkers) {
+ var initialMessage = {
+ sn: sn,
+ codecClass: 'Inflater',
+ crcType: crcType,
+ };
+ launchWorkerProcess(worker, initialMessage, reader, writer, offset, size, onprogress, onend, onreaderror, onwriteerror);
+ } else
+ launchProcess(new obj.zip.Inflater(), reader, writer, offset, size, crcType, onprogress, onend, onreaderror, onwriteerror);
+ }
+
+ function deflate(worker, sn, reader, writer, level, onend, onprogress, onreaderror, onwriteerror) {
+ var crcType = 'input';
+ if (obj.zip.useWebWorkers) {
+ var initialMessage = {
+ sn: sn,
+ options: {level: level},
+ codecClass: 'Deflater',
+ crcType: crcType,
+ };
+ launchWorkerProcess(worker, initialMessage, reader, writer, 0, reader.size, onprogress, onend, onreaderror, onwriteerror);
+ } else
+ launchProcess(new obj.zip.Deflater(), reader, writer, 0, reader.size, crcType, onprogress, onend, onreaderror, onwriteerror);
+ }
+
+ function copy(worker, sn, reader, writer, offset, size, computeCrc32, onend, onprogress, onreaderror, onwriteerror) {
+ var crcType = 'input';
+ if (obj.zip.useWebWorkers && computeCrc32) {
+ var initialMessage = {
+ sn: sn,
+ codecClass: 'NOOP',
+ crcType: crcType,
+ };
+ launchWorkerProcess(worker, initialMessage, reader, writer, offset, size, onprogress, onend, onreaderror, onwriteerror);
+ } else
+ launchProcess(new NOOP(), reader, writer, offset, size, crcType, onprogress, onend, onreaderror, onwriteerror);
+ }
+
+ // ZipReader
+
+ function decodeASCII(str) {
+ var i, out = "", charCode, extendedASCII = [ '\u00C7', '\u00FC', '\u00E9', '\u00E2', '\u00E4', '\u00E0', '\u00E5', '\u00E7', '\u00EA', '\u00EB',
+ '\u00E8', '\u00EF', '\u00EE', '\u00EC', '\u00C4', '\u00C5', '\u00C9', '\u00E6', '\u00C6', '\u00F4', '\u00F6', '\u00F2', '\u00FB', '\u00F9',
+ '\u00FF', '\u00D6', '\u00DC', '\u00F8', '\u00A3', '\u00D8', '\u00D7', '\u0192', '\u00E1', '\u00ED', '\u00F3', '\u00FA', '\u00F1', '\u00D1',
+ '\u00AA', '\u00BA', '\u00BF', '\u00AE', '\u00AC', '\u00BD', '\u00BC', '\u00A1', '\u00AB', '\u00BB', '_', '_', '_', '\u00A6', '\u00A6',
+ '\u00C1', '\u00C2', '\u00C0', '\u00A9', '\u00A6', '\u00A6', '+', '+', '\u00A2', '\u00A5', '+', '+', '-', '-', '+', '-', '+', '\u00E3',
+ '\u00C3', '+', '+', '-', '-', '\u00A6', '-', '+', '\u00A4', '\u00F0', '\u00D0', '\u00CA', '\u00CB', '\u00C8', 'i', '\u00CD', '\u00CE',
+ '\u00CF', '+', '+', '_', '_', '\u00A6', '\u00CC', '_', '\u00D3', '\u00DF', '\u00D4', '\u00D2', '\u00F5', '\u00D5', '\u00B5', '\u00FE',
+ '\u00DE', '\u00DA', '\u00DB', '\u00D9', '\u00FD', '\u00DD', '\u00AF', '\u00B4', '\u00AD', '\u00B1', '_', '\u00BE', '\u00B6', '\u00A7',
+ '\u00F7', '\u00B8', '\u00B0', '\u00A8', '\u00B7', '\u00B9', '\u00B3', '\u00B2', '_', ' ' ];
+ for (i = 0; i < str.length; i++) {
+ charCode = str.charCodeAt(i) & 0xFF;
+ if (charCode > 127)
+ out += extendedASCII[charCode - 128];
+ else
+ out += String.fromCharCode(charCode);
+ }
+ return out;
+ }
+
+ function decodeUTF8(string) {
+ return decodeURIComponent(escape(string));
+ }
+
+ function getString(bytes) {
+ var i, str = "";
+ for (i = 0; i < bytes.length; i++)
+ str += String.fromCharCode(bytes[i]);
+ return str;
+ }
+
+ function getDate(timeRaw) {
+ var date = (timeRaw & 0xffff0000) >> 16, time = timeRaw & 0x0000ffff;
+ try {
+ return new Date(1980 + ((date & 0xFE00) >> 9), ((date & 0x01E0) >> 5) - 1, date & 0x001F, (time & 0xF800) >> 11, (time & 0x07E0) >> 5,
+ (time & 0x001F) * 2, 0);
+ } catch (e) {
+ }
+ }
+
+ function readCommonHeader(entry, data, index, centralDirectory, onerror) {
+ entry.version = data.view.getUint16(index, true);
+ entry.bitFlag = data.view.getUint16(index + 2, true);
+ entry.compressionMethod = data.view.getUint16(index + 4, true);
+ entry.lastModDateRaw = data.view.getUint32(index + 6, true);
+ entry.lastModDate = getDate(entry.lastModDateRaw);
+ if ((entry.bitFlag & 0x01) === 0x01) {
+ onerror(ERR_ENCRYPTED);
+ return;
+ }
+ if (centralDirectory || (entry.bitFlag & 0x0008) != 0x0008) {
+ entry.crc32 = data.view.getUint32(index + 10, true);
+ entry.compressedSize = data.view.getUint32(index + 14, true);
+ entry.uncompressedSize = data.view.getUint32(index + 18, true);
+ }
+ if (entry.compressedSize === 0xFFFFFFFF || entry.uncompressedSize === 0xFFFFFFFF) {
+ onerror(ERR_ZIP64);
+ return;
+ }
+ entry.filenameLength = data.view.getUint16(index + 22, true);
+ entry.extraFieldLength = data.view.getUint16(index + 24, true);
+ }
+
+ function createZipReader(reader, callback, onerror) {
+ var inflateSN = 0;
+
+ function Entry() {
+ }
+
+ Entry.prototype.getData = function(writer, onend, onprogress, checkCrc32) {
+ var that = this;
+
+ function testCrc32(crc32) {
+ var dataCrc32 = getDataHelper(4);
+ dataCrc32.view.setUint32(0, crc32);
+ return that.crc32 == dataCrc32.view.getUint32(0);
+ }
+
+ function getWriterData(uncompressedSize, crc32) {
+ if (checkCrc32 && !testCrc32(crc32))
+ onerror(ERR_CRC);
+ else
+ writer.getData(function(data) {
+ onend(data);
+ });
+ }
+
+ function onreaderror(err) {
+ onerror(err || ERR_READ_DATA);
+ }
+
+ function onwriteerror(err) {
+ onerror(err || ERR_WRITE_DATA);
+ }
+
+ reader.readUint8Array(that.offset, 30, function(bytes) {
+ var data = getDataHelper(bytes.length, bytes), dataOffset;
+ if (data.view.getUint32(0) != 0x504b0304) {
+ onerror(ERR_BAD_FORMAT);
+ return;
+ }
+ readCommonHeader(that, data, 4, false, onerror);
+ dataOffset = that.offset + 30 + that.filenameLength + that.extraFieldLength;
+ writer.init(function() {
+ if (that.compressionMethod === 0)
+ copy(that._worker, inflateSN++, reader, writer, dataOffset, that.compressedSize, checkCrc32, getWriterData, onprogress, onreaderror, onwriteerror);
+ else
+ inflate(that._worker, inflateSN++, reader, writer, dataOffset, that.compressedSize, checkCrc32, getWriterData, onprogress, onreaderror, onwriteerror);
+ }, onwriteerror);
+ }, onreaderror);
+ };
+
+ function seekEOCDR(eocdrCallback) {
+ // "End of central directory record" is the last part of a zip archive, and is at least 22 bytes long.
+ // Zip file comment is the last part of EOCDR and has max length of 64KB,
+ // so we only have to search the last 64K + 22 bytes of a archive for EOCDR signature (0x06054b50).
+ var EOCDR_MIN = 22;
+ if (reader.size < EOCDR_MIN) {
+ onerror(ERR_BAD_FORMAT);
+ return;
+ }
+ var ZIP_COMMENT_MAX = 256 * 256, EOCDR_MAX = EOCDR_MIN + ZIP_COMMENT_MAX;
+
+ // In most cases, the EOCDR is EOCDR_MIN bytes long
+ doSeek(EOCDR_MIN, function() {
+ // If not found, try within EOCDR_MAX bytes
+ doSeek(Math.min(EOCDR_MAX, reader.size), function() {
+ onerror(ERR_BAD_FORMAT);
+ });
+ });
+
+ // seek last length bytes of file for EOCDR
+ function doSeek(length, eocdrNotFoundCallback) {
+ reader.readUint8Array(reader.size - length, length, function(bytes) {
+ for (var i = bytes.length - EOCDR_MIN; i >= 0; i--) {
+ if (bytes[i] === 0x50 && bytes[i + 1] === 0x4b && bytes[i + 2] === 0x05 && bytes[i + 3] === 0x06) {
+ eocdrCallback(new DataView(bytes.buffer, i, EOCDR_MIN));
+ return;
+ }
+ }
+ eocdrNotFoundCallback();
+ }, function() {
+ onerror(ERR_READ);
+ });
+ }
+ }
+
+ var zipReader = {
+ getEntries : function(callback) {
+ var worker = this._worker;
+ // look for End of central directory record
+ seekEOCDR(function(dataView) {
+ var datalength, fileslength;
+ datalength = dataView.getUint32(16, true);
+ fileslength = dataView.getUint16(8, true);
+ if (datalength < 0 || datalength >= reader.size) {
+ onerror(ERR_BAD_FORMAT);
+ return;
+ }
+ reader.readUint8Array(datalength, reader.size - datalength, function(bytes) {
+ var i, index = 0, entries = [], entry, filename, comment, data = getDataHelper(bytes.length, bytes);
+ for (i = 0; i < fileslength; i++) {
+ entry = new Entry();
+ entry._worker = worker;
+ if (data.view.getUint32(index) != 0x504b0102) {
+ onerror(ERR_BAD_FORMAT);
+ return;
+ }
+ readCommonHeader(entry, data, index + 6, true, onerror);
+ entry.commentLength = data.view.getUint16(index + 32, true);
+ entry.directory = ((data.view.getUint8(index + 38) & 0x10) == 0x10);
+ entry.offset = data.view.getUint32(index + 42, true);
+ filename = getString(data.array.subarray(index + 46, index + 46 + entry.filenameLength));
+ entry.filename = ((entry.bitFlag & 0x0800) === 0x0800) ? decodeUTF8(filename) : decodeASCII(filename);
+ if (!entry.directory && entry.filename.charAt(entry.filename.length - 1) == "/")
+ entry.directory = true;
+ comment = getString(data.array.subarray(index + 46 + entry.filenameLength + entry.extraFieldLength, index + 46
+ + entry.filenameLength + entry.extraFieldLength + entry.commentLength));
+ entry.comment = ((entry.bitFlag & 0x0800) === 0x0800) ? decodeUTF8(comment) : decodeASCII(comment);
+ entries.push(entry);
+ index += 46 + entry.filenameLength + entry.extraFieldLength + entry.commentLength;
+ }
+ callback(entries);
+ }, function() {
+ onerror(ERR_READ);
+ });
+ });
+ },
+ close : function(callback) {
+ if (this._worker) {
+ this._worker.terminate();
+ this._worker = null;
+ }
+ if (callback)
+ callback();
+ },
+ _worker: null
+ };
+
+ if (!obj.zip.useWebWorkers)
+ callback(zipReader);
+ else {
+ createWorker('inflater',
+ function(worker) {
+ zipReader._worker = worker;
+ callback(zipReader);
+ },
+ function(err) {
+ onerror(err);
+ }
+ );
+ }
+ }
+
+ // ZipWriter
+
+ function encodeUTF8(string) {
+ return unescape(encodeURIComponent(string));
+ }
+
+ function getBytes(str) {
+ var i, array = [];
+ for (i = 0; i < str.length; i++)
+ array.push(str.charCodeAt(i));
+ return array;
+ }
+
+ function createZipWriter(writer, callback, onerror, dontDeflate) {
+ var files = {}, filenames = [], datalength = 0;
+ var deflateSN = 0;
+
+ function onwriteerror(err) {
+ onerror(err || ERR_WRITE);
+ }
+
+ function onreaderror(err) {
+ onerror(err || ERR_READ_DATA);
+ }
+
+ var zipWriter = {
+ add : function(name, reader, onend, onprogress, options) {
+ var header, filename, date;
+ var worker = this._worker;
+
+ function writeHeader(callback) {
+ var data;
+ date = options.lastModDate || new Date();
+ header = getDataHelper(26);
+ files[name] = {
+ headerArray : header.array,
+ directory : options.directory,
+ filename : filename,
+ offset : datalength,
+ comment : getBytes(encodeUTF8(options.comment || ""))
+ };
+ header.view.setUint32(0, 0x14000808);
+ if (options.version)
+ header.view.setUint8(0, options.version);
+ if (!dontDeflate && options.level !== 0 && !options.directory)
+ header.view.setUint16(4, 0x0800);
+ header.view.setUint16(6, (((date.getHours() << 6) | date.getMinutes()) << 5) | date.getSeconds() / 2, true);
+ header.view.setUint16(8, ((((date.getFullYear() - 1980) << 4) | (date.getMonth() + 1)) << 5) | date.getDate(), true);
+ header.view.setUint16(22, filename.length, true);
+ data = getDataHelper(30 + filename.length);
+ data.view.setUint32(0, 0x504b0304);
+ data.array.set(header.array, 4);
+ data.array.set(filename, 30);
+ datalength += data.array.length;
+ writer.writeUint8Array(data.array, callback, onwriteerror);
+ }
+
+ function writeFooter(compressedLength, crc32) {
+ var footer = getDataHelper(16);
+ datalength += compressedLength || 0;
+ footer.view.setUint32(0, 0x504b0708);
+ if (typeof crc32 != "undefined") {
+ header.view.setUint32(10, crc32, true);
+ footer.view.setUint32(4, crc32, true);
+ }
+ if (reader) {
+ footer.view.setUint32(8, compressedLength, true);
+ header.view.setUint32(14, compressedLength, true);
+ footer.view.setUint32(12, reader.size, true);
+ header.view.setUint32(18, reader.size, true);
+ }
+ writer.writeUint8Array(footer.array, function() {
+ datalength += 16;
+ onend();
+ }, onwriteerror);
+ }
+
+ function writeFile() {
+ options = options || {};
+ name = name.trim();
+ if (options.directory && name.charAt(name.length - 1) != "/")
+ name += "/";
+ if (files.hasOwnProperty(name)) {
+ onerror(ERR_DUPLICATED_NAME);
+ return;
+ }
+ filename = getBytes(encodeUTF8(name));
+ filenames.push(name);
+ writeHeader(function() {
+ if (reader)
+ if (dontDeflate || options.level === 0)
+ copy(worker, deflateSN++, reader, writer, 0, reader.size, true, writeFooter, onprogress, onreaderror, onwriteerror);
+ else
+ deflate(worker, deflateSN++, reader, writer, options.level, writeFooter, onprogress, onreaderror, onwriteerror);
+ else
+ writeFooter();
+ }, onwriteerror);
+ }
+
+ if (reader)
+ reader.init(writeFile, onreaderror);
+ else
+ writeFile();
+ },
+ close : function(callback) {
+ if (this._worker) {
+ this._worker.terminate();
+ this._worker = null;
+ }
+
+ var data, length = 0, index = 0, indexFilename, file;
+ for (indexFilename = 0; indexFilename < filenames.length; indexFilename++) {
+ file = files[filenames[indexFilename]];
+ length += 46 + file.filename.length + file.comment.length;
+ }
+ data = getDataHelper(length + 22);
+ for (indexFilename = 0; indexFilename < filenames.length; indexFilename++) {
+ file = files[filenames[indexFilename]];
+ data.view.setUint32(index, 0x504b0102);
+ data.view.setUint16(index + 4, 0x1400);
+ data.array.set(file.headerArray, index + 6);
+ data.view.setUint16(index + 32, file.comment.length, true);
+ if (file.directory)
+ data.view.setUint8(index + 38, 0x10);
+ data.view.setUint32(index + 42, file.offset, true);
+ data.array.set(file.filename, index + 46);
+ data.array.set(file.comment, index + 46 + file.filename.length);
+ index += 46 + file.filename.length + file.comment.length;
+ }
+ data.view.setUint32(index, 0x504b0506);
+ data.view.setUint16(index + 8, filenames.length, true);
+ data.view.setUint16(index + 10, filenames.length, true);
+ data.view.setUint32(index + 12, length, true);
+ data.view.setUint32(index + 16, datalength, true);
+ writer.writeUint8Array(data.array, function() {
+ writer.getData(callback);
+ }, onwriteerror);
+ },
+ _worker: null
+ };
+
+ if (!obj.zip.useWebWorkers)
+ callback(zipWriter);
+ else {
+ createWorker('deflater',
+ function(worker) {
+ zipWriter._worker = worker;
+ callback(zipWriter);
+ },
+ function(err) {
+ onerror(err);
+ }
+ );
+ }
+ }
+
+ function resolveURLs(urls) {
+ var a = document.createElement('a');
+ return urls.map(function(url) {
+ a.href = url;
+ return a.href;
+ });
+ }
+
+ var DEFAULT_WORKER_SCRIPTS = {
+ deflater: ['z-worker.js', 'deflate.js'],
+ inflater: ['z-worker.js', 'inflate.js']
+ };
+ function createWorker(type, callback, onerror) {
+ if (obj.zip.workerScripts !== null && obj.zip.workerScriptsPath !== null) {
+ onerror(new Error('Either zip.workerScripts or zip.workerScriptsPath may be set, not both.'));
+ return;
+ }
+ var scripts;
+ if (obj.zip.workerScripts) {
+ scripts = obj.zip.workerScripts[type];
+ if (!Array.isArray(scripts)) {
+ onerror(new Error('zip.workerScripts.' + type + ' is not an array!'));
+ return;
+ }
+ scripts = resolveURLs(scripts);
+ } else {
+ scripts = DEFAULT_WORKER_SCRIPTS[type].slice(0);
+ scripts[0] = (obj.zip.workerScriptsPath || '') + scripts[0];
+ }
+ var worker = new Worker(scripts[0]);
+ // record total consumed time by inflater/deflater/crc32 in this worker
+ worker.codecTime = worker.crcTime = 0;
+ worker.postMessage({ type: 'importScripts', scripts: scripts.slice(1) });
+ worker.addEventListener('message', onmessage);
+ function onmessage(ev) {
+ var msg = ev.data;
+ if (msg.error) {
+ worker.terminate(); // should before onerror(), because onerror() may throw.
+ onerror(msg.error);
+ return;
+ }
+ if (msg.type === 'importScripts') {
+ worker.removeEventListener('message', onmessage);
+ worker.removeEventListener('error', errorHandler);
+ callback(worker);
+ }
+ }
+ // catch entry script loading error and other unhandled errors
+ worker.addEventListener('error', errorHandler);
+ function errorHandler(err) {
+ worker.terminate();
+ onerror(err);
+ }
+ }
+
+ function onerror_default(error) {
+ console.error(error);
+ }
+ obj.zip = {
+ Reader : Reader,
+ Writer : Writer,
+ BlobReader : BlobReader,
+ Data64URIReader : Data64URIReader,
+ TextReader : TextReader,
+ BlobWriter : BlobWriter,
+ Data64URIWriter : Data64URIWriter,
+ TextWriter : TextWriter,
+ createReader : function(reader, callback, onerror) {
+ onerror = onerror || onerror_default;
+
+ reader.init(function() {
+ createZipReader(reader, callback, onerror);
+ }, onerror);
+ },
+ createWriter : function(writer, callback, onerror, dontDeflate) {
+ onerror = onerror || onerror_default;
+ dontDeflate = !!dontDeflate;
+
+ writer.init(function() {
+ createZipWriter(writer, callback, onerror, dontDeflate);
+ }, onerror);
+ },
+ useWebWorkers : true,
+ /**
+ * Directory containing the default worker scripts (z-worker.js, deflate.js, and inflate.js), relative to current base url.
+ * E.g.: zip.workerScripts = './';
+ */
+ workerScriptsPath : null,
+ /**
+ * Advanced option to control which scripts are loaded in the Web worker. If this option is specified, then workerScriptsPath must not be set.
+ * workerScripts.deflater/workerScripts.inflater should be arrays of urls to scripts for deflater/inflater, respectively.
+ * Scripts in the array are executed in order, and the first one should be z-worker.js, which is used to start the worker.
+ * All urls are relative to current base url.
+ * E.g.:
+ * zip.workerScripts = {
+ * deflater: ['z-worker.js', 'deflate.js'],
+ * inflater: ['z-worker.js', 'inflate.js']
+ * };
+ */
+ workerScripts : null,
+ };
+
+})(this);