{“ast”:null,“code”:“'use strict';nnvar utils = require('./utils');nnvar support = require('./support');nnvar nodejsUtils = require('./nodejsUtils');nnvar GenericWorker = require('./stream/GenericWorker');n/**n * The following functions come from pako, from pako/lib/utils/stringsn * released under the MIT license, see pako github.com/nodeca/pako/n */n// Table with utf8 lengths (calculated by first byte of sequence)n// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,n// because max possible codepoint is 0x10ffffnnnvar _utf8len = new Array(256);nnfor (var i = 0; i < 256; i++) {n _utf8len = i >= 252 ? 6 : i >= 248 ? 5 : i >= 240 ? 4 : i >= 224 ? 3 : i >= 192 ? 2 : 1;n}nn_utf8len = _utf8len = 1; // Invalid sequence startn// convert string to array (typed, when possible)nnvar string2buf = function string2buf(str) {n var buf,n c,n c2,n m_pos,n i,n str_len = str.length,n buf_len = 0; // count binary sizenn for (m_pos = 0; m_pos < str_len; m_pos++) {n c = str.charCodeAt(m_pos);nn if ((c & 0xfc00) === 0xd800 && m_pos + 1 < str_len) {n c2 = str.charCodeAt(m_pos + 1);nn if ((c2 & 0xfc00) === 0xdc00) {n c = 0x10000 + (c - 0xd800 << 10) + (c2 - 0xdc00);n m_pos++;n }n }nn buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;n } // allocate buffernnn if (support.uint8array) {n buf = new Uint8Array(buf_len);n } else {n buf = new Array(buf_len);n } // convertnnn for (i = 0, m_pos = 0; i < buf_len; m_pos++) {n c = str.charCodeAt(m_pos);nn if ((c & 0xfc00) === 0xd800 && m_pos + 1 < str_len) {n c2 = str.charCodeAt(m_pos + 1);nn if ((c2 & 0xfc00) === 0xdc00) {n c = 0x10000 + (c - 0xd800 << 10) + (c2 - 0xdc00);n m_pos++;n }n }nn if (c < 0x80) {n /* one byte */n buf = c;n } else if (c < 0x800) {n /* two bytes */n buf = 0xC0 | c >>> 6;n buf = 0x80 | c & 0x3f;n } else if (c < 0x10000) {n /* three bytes */n buf = 0xE0 | c >>> 12;n buf = 0x80 | c >>> 6 & 0x3f;n buf = 0x80 | c & 0x3f;n } else {n /* four bytes */n buf = 0xf0 | c >>> 18;n buf = 0x80 | c >>> 12 & 0x3f;n buf = 0x80 | c >>> 6 & 0x3f;n buf = 0x80 | c & 0x3f;n }n }nn return buf;n}; // Calculate max possible position in utf8 buffer,n// that will not break sequence. If that's not possiblen// - (very small limits) return max size as is.n//n// buf[] - utf8 bytes arrayn// max - length limit (mandatory);nnnvar utf8border = function utf8border(buf, max) {n var pos;n max = max || buf.length;nn if (max > buf.length) {n max = buf.length;n } // go back from last position, until start of sequence foundnnn pos = max - 1;nn while (pos >= 0 && (buf & 0xC0) === 0x80) {n pos–;n } // Fuckup - very small and broken sequence,n // return max, because we should return something anyway.nnn if (pos < 0) {n return max;n } // If we came to start of buffer - that means vuffer is too small,n // return max too.nnn if (pos === 0) {n return max;n }nn return pos + _utf8len[buf] > max ? pos : max;n}; // convert array to stringnnnvar buf2string = function buf2string(buf) {n var str, i, out, c, c_len;n var len = buf.length; // Reserve max possible length (2 words per char)n // NB: by unknown reasons, Array is significantly faster forn // String.fromCharCode.apply than Uint16Array.nn var utf16buf = new Array(len * 2);nn for (out = 0, i = 0; i < len;) {n c = buf; // quick process asciinn if (c < 0x80) {n utf16buf = c;n continue;n }nn c_len = _utf8len; // skip 5 & 6 byte codesnn if (c_len > 4) {n utf16buf = 0xfffd;n i += c_len - 1;n continue;n } // apply mask on first bytennn c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; // join the restnn while (c_len > 1 && i < len) {n c = c << 6 | buf & 0x3f;n c_len–;n } // terminated by end of string?nnn if (c_len > 1) {n utf16buf = 0xfffd;n continue;n }nn if (c < 0x10000) {n utf16buf = c;n } else {n c -= 0x10000;n utf16buf = 0xd800 | c >> 10 & 0x3ff;n utf16buf = 0xdc00 | c & 0x3ff;n }n } // shrinkBuf(utf16buf, out)nnn if (utf16buf.length !== out) {n if (utf16buf.subarray) {n utf16buf = utf16buf.subarray(0, out);n } else {n utf16buf.length = out;n }n } // return String.fromCharCode.apply(null, utf16buf);nnn return utils.applyFromCharCode(utf16buf);n}; // That's all for the pako functions.nn/**n * Transform a javascript string into an array (typed if possible) of bytes,n * UTF-8 encoded.n * @param {String} str the string to encoden * @return {Array|Uint8Array|Buffer} the UTF-8 encoded string.n */nnnexports.utf8encode = function utf8encode(str) {n if (support.nodebuffer) {n return nodejsUtils.newBufferFrom(str, "utf-8");n }nn return string2buf(str);n};n/**n * Transform a bytes array (or a representation) representing an UTF-8 encodedn * string into a javascript string.n * @param {Array|Uint8Array|Buffer} buf the data de decoden * @return {String} the decoded string.n */nnnexports.utf8decode = function utf8decode(buf) {n if (support.nodebuffer) {n return utils.transformTo("nodebuffer", buf).toString("utf-8");n }nn buf = utils.transformTo(support.uint8array ? "uint8array" : "array", buf);n return buf2string(buf);n};n/**n * A worker to decode utf8 encoded binary chunks into string chunks.n * @constructorn */nnnfunction Utf8DecodeWorker() {n GenericWorker.call(this, "utf-8 decode"); // the last bytes if a chunk didn't end with a complete codepoint.nn this.leftOver = null;n}nnutils.inherits(Utf8DecodeWorker, GenericWorker);n/**n * @see GenericWorker.processChunkn */nnUtf8DecodeWorker.prototype.processChunk = function (chunk) {n var data = utils.transformTo(support.uint8array ? "uint8array" : "array", chunk.data); // 1st step, re-use what's left of the previous chunknn if (this.leftOver && this.leftOver.length) {n if (support.uint8array) {n var previousData = data;n data = new Uint8Array(previousData.length + this.leftOver.length);n data.set(this.leftOver, 0);n data.set(previousData, this.leftOver.length);n } else {n data = this.leftOver.concat(data);n }nn this.leftOver = null;n }nn var nextBoundary = utf8border(data);n var usableData = data;nn if (nextBoundary !== data.length) {n if (support.uint8array) {n usableData = data.subarray(0, nextBoundary);n this.leftOver = data.subarray(nextBoundary, data.length);n } else {n usableData = data.slice(0, nextBoundary);n this.leftOver = data.slice(nextBoundary, data.length);n }n }nn this.push({n data: exports.utf8decode(usableData),n meta: chunk.metan });n};n/**n * @see GenericWorker.flushn */nnnUtf8DecodeWorker.prototype.flush = function () {n if (this.leftOver && this.leftOver.length) {n this.push({n data: exports.utf8decode(this.leftOver),n meta: {}n });n this.leftOver = null;n }n};nnexports.Utf8DecodeWorker = Utf8DecodeWorker;n/**n * A worker to endcode string chunks into utf8 encoded binary chunks.n * @constructorn */nnfunction Utf8EncodeWorker() {n GenericWorker.call(this, "utf-8 encode");n}nnutils.inherits(Utf8EncodeWorker, GenericWorker);n/**n * @see GenericWorker.processChunkn */nnUtf8EncodeWorker.prototype.processChunk = function (chunk) {n this.push({n data: exports.utf8encode(chunk.data),n meta: chunk.metan });n};nnexports.Utf8EncodeWorker = Utf8EncodeWorker;”,“map”:null,“metadata”:{},“sourceType”:“module”}