.dtype);if(2!==s.rank)throw Error(`Indices should be Tensor2D but received shape ${s.shape}`);if(1!==i.rank)throw Error(`Values should be Tensor1D but received shape ${i.shape}`);if(1!==o.rank)throw Error(`Dense shape should be Tensor1D but received shape ${o.shape}`);if(0!==l.rank)throw Error(`Default value should be a scalar but received shape ${l.shape}`);let u=ay.runKernel(rU,{indices:s,values:i,denseShape:o,defaultValue:l});return{outputIndices:u[0],outputValues:u[1],emptyRowIndicator:u[2],reverseIndexMap:u[3]}}}),on=op({sparseReshape_:function(t,r,a){let n=tensor_util_env_convertToTensor(t,"inputIndices","sparseReshape","int32"),s=tensor_util_env_convertToTensor(r,"inputShape","sparseReshape","int32"),i=tensor_util_env_convertToTensor(a,"newShape","sparseReshape","int32");if(2!==n.rank)throw Error(`Input indices should be Tensor2D but received shape ${n.shape}`);if(1!==s.rank)throw Error(`Input shape should be Tensor1D but received shape ${s.shape}`);if(1!==i.rank)throw Error(`New shape should be Tensor1D but received shape ${i.shape}`);let o=ay.runKernel(rG,{inputIndices:n,inputShape:s,newShape:i});return{outputIndices:o[0],outputShape:o[1]}}}),os=op({sparseSegmentMean_:function(t,r,a){let n=tensor_util_env_convertToTensor(t,"data","sparseSegmentMean"),s=tensor_util_env_convertToTensor(r,"indices","sparseSegmentMean","int32"),i=tensor_util_env_convertToTensor(a,"segmentIds","sparseSegmentMean","int32");if(n.rank<1)throw Error("Data should be at least 1 dimensional but received scalar");if(1!==s.rank)throw Error(`Indices should be Tensor1D but received shape ${s.shape}`);if(1!==i.rank)throw Error(`Segment ids should be Tensor1D but received shape ${i.shape}`);return ay.runKernel(rj,{data:n,indices:s,segmentIds:i})}}),oi=op({sparseSegmentSum_:function(t,r,a){let n=tensor_util_env_convertToTensor(t,"data","sparseSegmentSum"),s=tensor_util_env_convertToTensor(r,"indices","sparseSegmentSum","int32"),i=tensor_util_env_convertToTensor(a,"segmentIds","sparseSegmentSum","int32");if(n.rank<1)throw Error("Data should be at least 1 dimensional but received scalar");if(1!==s.rank)throw Error(`Indices should be Tensor1D but received shape ${s.shape}`);if(1!==i.rank)throw Error(`Segment ids should be Tensor1D but received shape ${i.shape}`);return ay.runKernel(rK,{data:n,indices:s,segmentIds:i})}}),oo=op({stringNGrams_:function(t,r,a,n,s,i,o,l){let u=tensor_util_env_convertToTensor(t,"data","stringNGrams","string");if("string"!==u.dtype)throw Error("Data must be of datatype string");if(1!==u.shape.length)throw Error(`Data must be a vector, saw: ${u.shape}`);let p=tensor_util_env_convertToTensor(r,"dataSplits","stringNGrams");if("int32"!==p.dtype)throw Error("Data splits must be of datatype int32");let m=ay.runKernel(rJ,{data:u,dataSplits:p},{separator:a,nGramWidths:n,leftPad:s,rightPad:i,padWidth:o,preserveShortSequences:l});return{nGrams:m[0],nGramsSplits:m[1]}}}),ol=op({stringSplit_:function(t,r,a=!0){let n=tensor_util_env_convertToTensor(t,"input","stringSplit","string"),s=tensor_util_env_convertToTensor(r,"delimiter","stringSplit","string");if(1!==n.rank)throw Error(`Input should be Tensor1D but received shape ${n.shape}`);if(0!==s.rank)throw Error(`Delimiter should be a scalar but received shape ${s.shape}`);let i=ay.runKernel(rQ,{input:n,delimiter:s},{skipEmpty:a});return{indices:i[0],values:i[1],shape:i[2]}}}),ou=op({stringToHashBucketFast_:function(t,r){let a=tensor_util_env_convertToTensor(t,"input","stringToHashBucketFast","string");if(r<=0)throw Error("Number of buckets must be at least 1");return ay.runKernel(r0,{input:a},{numBuckets:r})}}),oh=op({staticRegexReplace_:function(t,r,a,n=!0){let s=tensor_util_env_convertToTensor(t,"input","staticRegexReplace","string");return ay.runKernel(rY,{x:s},{pattern:r,rewrite:a,replaceGlobal:n})}}),oc={fft:il,ifft:iu,rfft:ic,irfft:ip},od={hammingWindow:iL,hannWindow:iz,frame:iV,stft:iB},om={flipLeftRight:iU,grayscaleToRGB:iG,resizeNearestNeighbor:iZ,resizeBilinear:iY,rgbToGrayscale:ij,rotateWithOffset:iK,cropAndResize:iW,nonMaxSuppression:iH,nonMaxSuppressionAsync:nonMaxSuppressionAsync,nonMaxSuppressionWithScore:iq,nonMaxSuppressionWithScoreAsync:nonMaxSuppressionWithScoreAsync,nonMaxSuppressionPadded:iX,nonMaxSuppressionPaddedAsync:nonMaxSuppressionPaddedAsync,threshold:iJ,transform:iQ},of={bandPart:i0,gramSchmidt:i1,qr:i2},og={absoluteDifference:i4,computeWeightedLoss:i3,cosineDistance:i6,hingeLoss:i5,huberLoss:i8,logLoss:i7,meanSquaredError:i9,sigmoidCrossEntropy:oe,softmaxCrossEntropy:ot},oy={sparseFillEmptyRows:or,sparseReshape:on,sparseSegmentMean:os,sparseSegmentSum:oi},ox={stringNGrams:oo,stringSplit:ol,stringToHashBucketFast:ou,staticRegexReplace:oh};let OptimizerConstructors=class OptimizerConstructors{static sgd(t){return new SGDOptimizer(t)}static momentum(t,r,a=!1){return new MomentumOptimizer(t,r,a)}static rmsprop(t,r=.9,a=0,n=null,s=!1){return new RMSPropOptimizer(t,r,a,n,s)}static adam(t=.001,r=.9,a=.999,n=null){return new AdamOptimizer(t,r,a,n)}static adadelta(t=.001,r=.95,a=null){return new AdadeltaOptimizer(t,r,a)}static adamax(t=.002,r=.9,a=.999,n=null,s=0){return new AdamaxOptimizer(t,r,a,n,s)}static adagrad(t,r=.1){return new AdagradOptimizer(t,r)}};let ov="u">typeof requestAnimationFrame?requestAnimationFrame:"u">typeof setImmediate?setImmediate:t=>t();function nextFrame(){return new Promise(t=>ov(()=>t()))}function assertParamsConsistent(t,r){let a=t[0].length;t.forEach((t,r)=>{assert(t.length===a,()=>`Error in concat${a}D: rank of tensors[${r}] must be the same as the rank of the rest (${a})`)}),assert(r>=0&&r`Error in concat${a}D: axis must be between 0 and ${a-1}.`);let n=t[0];t.forEach((t,s)=>{for(let i=0;i`Error in concat${a}D: Shape of tensors[${s}] (${t}) does not match the shape of the rest (${n}) along the non-concatenated axis ${s}.`)})}function concat_util_computeOutShape(t,r){let a=t[0].slice();for(let n=1;n=0)if(l>=0){if(l!==i)throw Error(`rt input.shape and shape=${r} are incompatible: rt input.shape[${s+t}] = ${i} but shape[${s+t}] = ${l}`)}else n[o]=i}return n}function getRowPartitionTypesHelper(t){let r={FIRST_DIM_SIZE:et.FIRST_DIM_SIZE,VALUE_ROWIDS:et.VALUE_ROWIDS,ROW_LENGTHS:et.ROW_LENGTHS,ROW_SPLITS:et.ROW_SPLITS,ROW_LIMITS:et.ROW_LIMITS,ROW_STARTS:et.ROW_STARTS},a=[];for(let n of t)if(n in r)a.push(r[n]);else break;return a}function getRaggedRank(t){return 0===t.length?0:t[0]===et.FIRST_DIM_SIZE?t.length-1:t.length}function validateDefaultValueShape(t,r){if(null==t||null==r)return;let a=t.length,n=r.length;if(a>=n)throw Error(`defaultValue.shape=${t} and ragged tensor flatValues.shape=${r}, are incompatible: defaultValue.rank = ${a} must be less than ragged tensor input flatValues.rank = ${n})`);for(let s=0;s=0&&n>=0&&1!==a&&a!==n)throw Error(`defaultValue.shape=${t}, and ragged tensor input flatValues.shape=${r} are incompatible: defaultValue.shape[${s-t.length}] = ${a} but ragged tensor input.flatValues.shape[${s-t.length}] = ${n}`)}}(A=et||(et={}))[A.FIRST_DIM_SIZE=0]="FIRST_DIM_SIZE",A[A.VALUE_ROWIDS=1]="VALUE_ROWIDS",A[A.ROW_LENGTHS=2]="ROW_LENGTHS",A[A.ROW_SPLITS=3]="ROW_SPLITS",A[A.ROW_LIMITS=4]="ROW_LIMITS",A[A.ROW_STARTS=5]="ROW_STARTS";let o_=30;function computeOptimalWindowSize(t){return t<=o_?t:nearestDivisor(t,Math.floor(Math.sqrt(t)))}function getImageCenter(t,r,a){return[a*("number"==typeof t?t:t[0]),r*("number"==typeof t?t:t[1])]}function getReshaped(t,r,a,n=!0){let s=[];if(n)(s=s.concat(r.slice(0))).push(t[0]/a),s=s.concat(t.slice(1));else{s=s.concat(t[0]);let a=r.length;for(let n=0;n=2*r+1||n%2==1?s.push(n):a.push(n);n.push(...a),n.push(0),n.push(...s)}return n}function getReshapedPermuted(t,r,a,n=!0){let s=[];n?s.push(t[0]/a):s.push(t[0]*a);for(let a=1;aa)throw Error(`index innermost dimension length must be <= tensor rank; saw: ${r.shape[n-1]} vs. ${a}`);if(0===sizeFromShape(t.shape))throw Error(`Requested more than 0 entries, but input is empty. Input shape: ${t.shape}.`);let s=r.shape,i=s[s.length-1],o=1;for(let t=0;tt/p),1].slice(0,i);return[u,o,p,m]}let oT=1.7580993408473768,ok=1.0507009873554805,oS=.3275911,ow=.254829592,oI=-.284496736,oN=1.421413741,oC=-1.453152027,oE=1.061405429;function mergeRealAndImagArrays(t,r){if(t.length!==r.length)throw Error(`Cannot merge real and imag arrays of different lengths. real:${t.length}, imag: ${r.length}.`);let a=new Float32Array(2*t.length);for(let n=0;n/g;function decodeEinsumEquation(t,r){let a=((t=t.replace(/\s/g,"")).length-t.replace(oA,"").length)/2;if(a<1)throw Error("Equations without an arrow are not supported.");if(a>1)throw Error('Equation must contain exactly one arrow ("->").');let[n,s]=t.split("->");assert(-1===n.indexOf("..."),()=>'The ellipsis notation ("...") is not supported yet.');let i=n.split(","),o=i.length;if(r!==o)throw Error(`Expected ${o} input tensors, received ${r}`);if(o>2)throw Error("Support for more than 2 input tensors is not implemented yet.");let l=[];for(let t=0;t-1!==t.indexOf(r)))throw Error(`Output subscripts contain the label ${r} not present in the input subscripts.`);-1===l.indexOf(r)&&l.push(r)}for(let t=0;t-1!==t),expandDims:n}}function checkEinsumDimSizes(t,r,a){let n=Array(t);for(let t=0;t`Expected dimension ${n[r[t][a]]} at axis ${a} of input shaped ${JSON.stringify(s)}, but got dimension ${s[a]}`)}}function getEinsumComputePath(t,r){let a=[],n=0;0===t.length&&t.push(-1),n=t.length+1;for(let t=0;tt===r)}function findTermsWithDim(t,r){let a=[];for(let n=0;n"Number of splits must evenly divide the axis."),n=Array(r).fill(t.shape[a]/r);else{assert(1>=r.reduce((t,r)=>(-1===r&&(t+=1),t),0),()=>"There should be only one negative value in split array.");let s=r.indexOf(-1);if(-1!==s){let n=r.reduce((t,r)=>r>0?t+r:t);r[s]=t.shape[a]-n}assert(t.shape[a]===r.reduce((t,r)=>t+r),()=>"The sum of sizes must match the size of the axis dimension."),n=r}return n}function getSparseFillEmptyRowsIndicesDenseShapeMismatch(t){return`Received SparseTensor with denseShape[0] = 0 but indices.shape[0] = ${t}`}function getSparseFillEmptyRowsNegativeIndexErrorMessage(t,r){return`indices(${t}, 0) is invalid: ${r} < 0`}function getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(t,r,a){return`indices(${t}, 0) is invalid: ${r} >= ${a}`}function getSparseReshapeMultipleNegativeOneOutputDimErrorMessage(t,r){return`only one output dimension may be -1, not both ${t} and ${r}`}function getSparseReshapeNegativeOutputDimErrorMessage(t,r){return`size ${t} must be non-negative, not ${r}`}function getSparseReshapeEmptyTensorZeroOutputDimErrorMessage(){return"reshape cannot infer the missing input size for an empty tensor unless all specified input sizes are non-zero"}function getSparseReshapeInputOutputMultipleErrorMessage(t,r){let a=sizeFromShape(t),n=sizeFromShape(r);return`Input to reshape is a SparseTensor with ${a} dense values, but the requested shape requires a multiple of ${n}. inputShape=${t} outputShape= ${r}`}function getSparseReshapeInputOutputMismatchErrorMessage(t,r){let a=sizeFromShape(t),n=sizeFromShape(r);return`Input to reshape is a tensor with ${a} dense values, but the requested shape has ${n}. inputShape=${t} outputShape=${r}`}function getSparseSegmentReductionNegativeSegmentIdsErrorMessage(){return"segment ids must be >= 0"}function getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage(){return"segment ids are not increasing"}function getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(t,r){return`Segment id ${t} out of range [0, ${r}), possibly because segmentIds input is not sorted.`}function getSparseSegmentReductionIndicesOutOfRangeErrorMessage(t,r,a){return`Bad: indices[${t}] == ${r} out of range [0, ${a})`}function segOpComputeOptimalWindowSize(t,r){let a,n=!1;for(t<=o_?(a=t,n=!0):a=nearestDivisor(t,Math.floor(Math.sqrt(t)));!n;)a>r||a===t?n=!0:a=nearestDivisor(t,a+1);return a}function segment_util_computeOutShape(t,r,a){let n=[],s=t.length;for(let i=0;is))throw Error(`Expect batchDims in the range of [-${s}, ${s}], but got ${n}`);if(n<0&&(n+=s),n>i)throw Error(`batchDims (${n}) must be less than rank(x) ( ${i}).`);if(adecodeString(t))}catch(t){throw Error(`Failed to decode encoded string bytes into utf-8, error: ${t}`)}}function fromStringArrayToUint8(t){return t.map(t=>encodeString(t))}registerOptimizers();let o$={kernelName:"Abs",inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(t,iy(aE(a,"float32"),-1))}}},oR=op({avgPool3dGrad_:function(t,r,a,n,s,i){let o=tensor_util_env_convertToTensor(t,"dy","avgPool3dGrad"),l=tensor_util_env_convertToTensor(r,"input","avgPool3dGrad"),u=o,p=l,m=!1;4===l.rank&&(m=!0,u=a6(o,[1,o.shape[0],o.shape[1],o.shape[2],o.shape[3]]),p=a6(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]])),assert(5===u.rank,()=>`Error in avgPool3dGrad: dy must be rank 5 but got rank ${u.rank}.`),assert(5===p.rank,()=>`Error in avgPool3dGrad: input must be rank 5 but got rank ${p.rank}.`),checkPadOnDimRoundingMode("avgPool3dGrad",s,i);let y={dy:u,input:p},_=ay.runKernel(e0,y,{filterSize:a,strides:n,pad:s,dimRoundingMode:i});return m?a6(_,[_.shape[1],_.shape[2],_.shape[3],_.shape[4]]):_}}),oF=op({avgPoolGrad_:function(t,r,a,n,s){let i=tensor_util_env_convertToTensor(t,"dy","avgPoolGrad"),o=tensor_util_env_convertToTensor(r,"input","avgPoolGrad");assert(o.rank===i.rank,()=>`Rank of input (${o.rank}) does not match rank of dy (${i.rank})`);let l=o,u=i,p=!1;3===o.rank&&(p=!0,l=a6(o,[1,o.shape[0],o.shape[1],o.shape[2]]),u=a6(i,[1,i.shape[0],i.shape[1],i.shape[2]])),assert(4===u.rank,()=>`Error in avgPoolGrad: dy must be rank 4 but got rank ${u.rank}.`),assert(4===l.rank,()=>`Error in avgPoolGrad: input must be rank 4 but got rank ${l.rank}.`);let m={dy:u,input:l},y=ay.runKernel(eJ,m,{filterSize:a,strides:n,pad:s});return p?a6(y,[y.shape[1],y.shape[2],y.shape[3]]):y}}),oD={kernelName:te,inputsToSave:["x"],gradFunc:o$.gradFunc},oP=op({conv3DBackpropFilter_:function(t,r,a,n,s){let i=t;4===t.rank&&(i=a6(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]]));let o=r;4===o.rank&&(o=a6(r,[1,r.shape[0],r.shape[1],r.shape[2],r.shape[3]])),assert(5===i.rank,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${i.shape}.`),assert(5===o.rank,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${o.shape}.`),assert(5===a.length,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${a}.`),assert(i.shape[4]===a[3],()=>`Error in conv3dDerFilter: depth of input ${i.shape[4]}) must match input depth in filter (${a[3]}.`),assert(o.shape[4]===a[4],()=>`Error in conv3dDerFilter: depth of dy (${o.shape[4]}) must match output depth for filter (${a[4]}).`);let l={x:i,dy:o};return ay.runKernel(to,l,{strides:n,pad:s,filterShape:a})}});function arrayRange(t,r){let a=[];for(let n=t;naD(t,aE(nM(a,r),t.dtype))}}let oM={kernelName:"Max",inputsToSave:["x"],outputsToSave:[!0],gradFunc:(t,r,a)=>{let{reductionIndices:n}=a,s=r[0],i=r[1],o=parseAxisParam(n,s.shape),l=gradForMinAndMax(t,i,s,o);return{x:()=>l.x()}}},oL=op({maxPool3dGrad_:function(t,r,a,n,s,i,o){let l=tensor_util_env_convertToTensor(t,"dy","maxPool3dGrad"),u=tensor_util_env_convertToTensor(r,"input","maxPool3dGrad"),p=tensor_util_env_convertToTensor(a,"output","maxPool3dGrad"),m=l,y=u,_=p,w=!1;4===u.rank&&(w=!0,m=a6(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]]),y=a6(u,[1,u.shape[0],u.shape[1],u.shape[2],u.shape[3]]),_=a6(p,[1,p.shape[0],p.shape[1],p.shape[2],p.shape[3]])),assert(5===m.rank,()=>`Error in maxPool3dGrad: dy must be rank 5 but got rank ${m.rank}.`),assert(5===y.rank,()=>`Error in maxPool3dGrad: input must be rank 5 but got rank ${y.rank}.`),assert(5===_.rank,()=>`Error in maxPool3dGrad: output must be rank 5 but got rank ${_.rank}.`),checkPadOnDimRoundingMode("maxPool3dGrad",i,o);let I={dy:m,input:y,output:_},C=ay.runKernel(t4,I,{filterSize:n,strides:s,pad:i,dimRoundingMode:o});return w?a6(C,[C.shape[1],C.shape[2],C.shape[3],C.shape[4]]):C}}),oz=op({maxPoolGrad_:function(t,r,a,n,s,i,o){let l=tensor_util_env_convertToTensor(t,"dy","maxPoolGrad"),u=tensor_util_env_convertToTensor(r,"input","maxPoolGrad"),p=tensor_util_env_convertToTensor(a,"output","maxPoolGrad");return assert(u.rank===l.rank,()=>`Rank of input (${u.rank}) does not match rank of dy (${l.rank})`),assert(4===l.rank,()=>`Error in maxPoolGrad: dy must be rank 4 but got rank ${l.rank}.`),assert(4===u.rank,()=>`Error in maxPoolGrad: input must be rank 4 but got rank ${u.rank}.`),checkPadOnDimRoundingMode("maxPoolGrad",i,o),ay.runKernel(t2,{dy:l,input:u,output:p},{filterSize:n,strides:s,pad:i,dimRoundingMode:o})}}),oV={kernelName:ru,inputsToSave:["x"],gradFunc:(t,r,a)=>{let n=r[0],{paddings:s}=a,i=s.map(t=>t[0]);return{x:()=>nt(t,i,n.shape)}}};function prodGradFn_(t,r,a){let n=t.shape.slice();n[a]=1;let s=a6(r,n),i=aD(nA(t,a,!0,!1),nA(t,a,!0,!0));return aD(s,i)}function prodsGradFn_(t,r,a){let n=t.shape.length,s=n-a.length,i=getAxesPermutation(a,n),o=t;null!=i&&(o=iI(t,i));let l=o.shape.slice(),u=l.splice(n-a.length,a.length).reduce((t,r)=>t*r,1);l.push(u);let p=prodGradFn_(o.reshape(l),r,s);return p=p.reshape(o.shape),null!=i&&(p=iI(p,getUndoAxesPermutation(i))),p}let oB={kernelName:rV,gradFunc:(t,r,a)=>{let{blockShape:n,paddings:s}=a;return{x:()=>ns(t,n,s)}}},oW={kernelName:rB,gradFunc:(t,r,a)=>{let{axis:n}=a;return{x:()=>a7(t,n)}}};function gatherDropNegatives(t,r){let a=n2(t,aU(r,aM(r))),n=n4(r,scalar_scalar(0,"int32")),s=a.rank-n.rank;for(let t=0;t{let[a]=r;return{x:()=>{let r=aO(aE(a,"float32"));return si(aF(t,aP(aB(scalar_scalar(1),r))))}}}},{kernelName:eW,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,aP(aB(aO(aE(a,"float32")),1)))}}},{kernelName:"Add",inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r,s=assertAndGetBroadcastShape(a.shape,n.shape);return{a:()=>{let r=t,n=getReductionAxes(a.shape,s);return n.length>0&&(r=nH(r,n)),a6(r,a.shape)},b:()=>{let r=t,a=getReductionAxes(n.shape,s);return a.length>0&&(r=nH(r,a)),a6(r,n.shape)}}}},{kernelName:eU,saveAllInputs:!0,gradFunc:(t,r)=>{let a={};return r.forEach((r,n)=>{a[n]=()=>t.clone()}),a}},{kernelName:eG,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aM(a)}}},{kernelName:ej,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aM(a)}}},{kernelName:eK,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,aP(aB(scalar_scalar(1),aO(aE(a,"float32")))))}}},{kernelName:eH,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,aP(a$(scalar_scalar(1),aO(aE(a,"float32")))))}}},{kernelName:eY,inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r,s=assertAndGetBroadcastShape(a.shape,n.shape);return{a:()=>{let r=a$(aO(a),aO(n)),i=aD(t,aF(n,r)),o=getReductionAxes(a.shape,s);return o.length>0&&(i=nH(i,o)),a6(i,a.shape)},b:()=>{let r=a$(aO(a),aO(n)),i=si(aD(t,aF(a,r))),o=getReductionAxes(n.shape,s);return o.length>0&&(i=nH(i,o)),a6(i,n.shape)}}}},{kernelName:eq,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,a$(aO(aE(a,"float32")),1))}}},{kernelName:eX,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,aB(scalar_scalar(1),aO(aE(a,"float32"))))}}},{kernelName:eQ,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{filterSize:s,strides:i,pad:o,dimRoundingMode:l}=a;return{x:()=>oR(t,n,s,i,o,l)}}},{kernelName:eZ,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{filterSize:s,strides:i,pad:o}=a;return{x:()=>oF(t,n,s,i,o)}}},{kernelName:e1,inputsToSave:["a","b"],gradFunc:(t,r,a)=>{let[n,s]=r,{transposeA:i,transposeB:o}=a;return i||o?!i&&o?{a:()=>a9(t,s,!1,!1),b:()=>a9(t,n,!0,!1)}:i&&!o?{a:()=>a9(s,t,!1,!0),b:()=>a9(n,t,!1,!1)}:{a:()=>a9(s,t,!0,!0),b:()=>a9(t,n,!0,!0)}:{a:()=>a9(t,s,!1,!0),b:()=>a9(n,t,!0,!1)}}},{kernelName:e2,gradFunc:(t,r,a)=>{let{blockShape:n,crops:s}=a;return{x:()=>sO(t,n,s)}}},{kernelName:"BroadcastTo",gradFunc:(t,r,a)=>{let n=a.inputShape,s=a.shape,i=Array.from(s);for(let t=n.length-1;t>=0;t--)if(n[t]===s[t])i[t]=1;else if(1!==n[t])throw Error(`broadcastTo(): [${n}] cannot be broadcast to [${s}].`);let o=[];for(let t=0;t1&&o.push(t);return{x:()=>nH(t,o,!0)}}},{kernelName:e5,gradFunc:t=>({x:()=>t.clone()})},{kernelName:e8,gradFunc:t=>({x:()=>aM(t)})},{kernelName:e7,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{clipValueMin:s,clipValueMax:i}=a;return{x:()=>nL(sh(n4(n,s),st(n,i)),t,aM(t))}}},oD,{kernelName:tt,saveAllInputs:!0,gradFunc:(t,r,a)=>{let n=r.map(t=>t.shape),{axis:s}=a,i=parseAxisParam(s,r[0].shape)[0];return ih(t,n.map(t=>t[i]),i).map(t=>()=>t)}},{kernelName:ts,inputsToSave:["dy","filter"],gradFunc:(t,r,a)=>{let[n,s]=r,{strides:i,pad:o,dataFormat:l,dimRoundingMode:u}=a;return{dy:()=>n_(t,s,i,o,l,1,u),filter:()=>iR(t,n,s.shape,i,o,l,u)}}},{kernelName:tr,inputsToSave:["x","filter"],gradFunc:(t,r,a)=>{let[n,s]=r,{dilations:i,strides:o,pad:l,dataFormat:u}=a;return assert(tupleValuesAreOne(i),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${i}'`),{x:()=>nk(n.shape,t,s,o,l,u),filter:()=>iR(n,t,s.shape,o,l,u)}}},{kernelName:ti,inputsToSave:["x","filter"],gradFunc:(t,r,a)=>{let{dilations:n,strides:s,pad:i}=a;assert(tupleValuesAreOne(n),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${n}'`);let[o,l]=r;return{x:()=>nI(o.shape,t,l,s,i),filter:()=>oP(o,t,l.shape,s,i)}}},{kernelName:"Cos",inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(si(ie(aE(a,"float32"))),t)}}},{kernelName:tu,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(it(aE(a,"float32")),t)}}},{kernelName:th,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{axis:s,exclusive:i,reverse:o}=a;return{x:()=>{let r=getAxesPermutation([s],n.rank),a=n$(t,s,i,!o);return null!=r&&(a=iI(a,r)),a}}}},{kernelName:tf,inputsToSave:["x","filter"],gradFunc:(t,r,a)=>{let{dilations:n,strides:s,pad:i,dimRoundingMode:o}=a,l=null==n?[1,1]:n;assert(tupleValuesAreOne(l),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${l}'`);let[u,p]=r;return assert(4===u.rank,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${u.rank}.`),assert(4===p.rank,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${p.rank}.`),assert(u.shape[3]===p.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${u.shape[3]}) must match the inChannels dimension in filter ${p.shape[2]}.`),assert(eitherStridesOrDilationsAreOne(s,l),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${s} and dilations '${l}'.`),checkPadOnDimRoundingMode("depthwiseConv2d",i,o),{x:()=>iP(u.shape,t,p,s,i,l,o),filter:()=>iD(u,t,p.shape,s,i,l,o)}}},{kernelName:tv,inputsToSave:["x","filter"],gradFunc:(t,r,a)=>{let[n,s]=r,i={x:n,filter:s,dy:t},o={x:n,filter:s,dy:t};return{x:()=>ay.runKernel(t_,i,a),filter:()=>ay.runKernel(tT,o,a)}}},{kernelName:tS,inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r,s=assertAndGetBroadcastShape(a.shape,n.shape);return{a:()=>{let r=aF(t,aE(n,"float32")),i=getReductionAxes(a.shape,s);return i.length>0?a6(nH(r,i),a.shape):r},b:()=>{let r=aD(t,aE(a,"float32")),i=getReductionAxes(n.shape,s);return i.length>0&&(r=a6(nH(r,i),n.shape)),si(aF(r,aE(aO(n),"float32")))}}}},{kernelName:"Elu",outputsToSave:[!0],gradFunc:(t,r)=>{let[a]=r,n={dy:t,y:a};return{x:()=>ay.runKernel(tI,n)}}},{kernelName:"Erf",inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r,n=aD(nY(si(aO(a))),2/Math.sqrt(Math.PI));return{x:()=>aD(t,n)}}},{kernelName:"Exp",outputsToSave:[!0],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(t,a)}}},{kernelName:tC,inputsToSave:["input"],gradFunc:(t,r)=>{let[a]=r;return{input:()=>a6(t,a.shape)}}},{kernelName:tE,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(t,nY(a))}}},{kernelName:tF,inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r,s=assertAndGetBroadcastShape(a.shape,n.shape);return{a:()=>{let r=aF(t,aE(n,"float32")),i=getReductionAxes(a.shape,s);return i.length>0?a6(nH(r,i),a.shape):r},b:()=>{let r=aD(t,aE(a,"float32")),i=getReductionAxes(n.shape,s);return i.length>0&&(r=a6(nH(r,i),n.shape)),si(aF(r,aE(aO(n),"float32")))}}}},{kernelName:tR,gradFunc:t=>({x:()=>aM(t)})},{kernelName:tD,inputsToSave:["x","mean","variance","scale"],gradFunc:(t,r,a)=>{let{varianceEpsilon:n}=a,[s,i,o,l]=r,u=null==l?scalar_scalar(1):l,p=getReductionAxes(i.shape,s.shape),m=[];if(1===i.rank){for(let t=0;t1===i.rank?a6(aD(aD(t,nQ(a6(w,[1,1,1,i.shape[0]]),m)),u),s.shape):a6(aD(aD(t,w),u),s.shape),mean:()=>{let t=aD(aD(w,scalar_scalar(-1)),_);return 1===i.rank&&(t=nH(t,p)),a6(t,i.shape)},variance:()=>{let t=aD(aD(I,y),_);return 1===i.rank&&(t=nH(t,p)),a6(t,i.shape)},scale:()=>{let r=aD(y,w),a=aD(t,r);return 1===i.rank&&(a=nH(a,p)),a6(a,i.shape)},offset:()=>{let r=t;return 1===i.rank&&(r=nH(r,p)),a6(r,i.shape)}}}},{kernelName:tP,inputsToSave:["x","indices"],gradFunc:(t,r,a)=>{let[n,s]=r,{axis:i,batchDims:o}=a,l=parseAxisParam(i,n.shape)[0],derXBatch=(t,r,a)=>()=>{let n=t.shape,s=r.size,o=n.slice(0,l),u=o.length,p=n.slice(i,n.length).slice(1),m=p.length,y=arrayRange(0,u),_=arrayRange(u+1,u+1+m),w=a6(a,arrayConcat([o,[s],p])),I=a6(r,[s]),C=arrayConcat([[u],y,_]),E=iS(iI(w,C),I,t.shape[l]);return iI(E,getUndoAxesPermutation(C))};if(1!==o)return{x:derXBatch(n,s,t),indices:()=>s};{let r=n.shape[0],a=n.split(r,0);return{x:()=>ig(a.map((r,a)=>derXBatch(r,s.slice(a,1),t.slice(a,1))())).reshape(n.shape),indices:()=>s}}}},{kernelName:tL,inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r;return{a:()=>aM(a),b:()=>aM(n)}}},{kernelName:tz,gradFunc:t=>({x:()=>aE(t,"float32")})},{kernelName:tW,gradFunc:t=>({x:()=>aM(t)})},{kernelName:tU,gradFunc:t=>({x:()=>aM(t)})},{kernelName:tG,gradFunc:t=>({x:()=>aM(t)})},{kernelName:tj,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{alpha:s}=a,i=n3(n,0);return{x:()=>nL(i,t,aD(t,s))}}},{kernelName:tX,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,a$(a,1))}}},{kernelName:"Log",inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,aE(a,"float32"))}}},{kernelName:"LogSoftmax",inputsToSave:[],outputsToSave:[!0],gradFunc:(t,r,a)=>{let[n]=r,{axis:s}=a;return{logits:()=>{let r=nY(n);return aB(t,aD(nH(t,s,!0),r))}}}},{kernelName:"LRN",inputsToSave:["x"],outputsToSave:[!0],gradFunc:(t,r,a)=>{let[n,s]=r,{depthRadius:i,bias:o,alpha:l,beta:u}=a;return{x:()=>oO(n,s,t,i,o,l,u)}}},oM,oM,{kernelName:t0,inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r;return{a:()=>aD(t,aE(n4(a,n),"float32")),b:()=>aD(t,aE(se(a,n),"float32"))}}},{kernelName:t3,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(t,r,a)=>{let[n,s]=r,{filterSize:i,strides:o,pad:l,dimRoundingMode:u}=a;return{x:()=>oL(t,n,s,i,o,l,u)}}},{kernelName:t1,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(t,r,a)=>{let[n,s]=r,{filterSize:i,strides:o,pad:l}=a;return{x:()=>oz(t,n,s,i,o,l)}}},{kernelName:t5,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{axis:s}=a,i=parseAxisParam(s,n.shape),o=sizeFromShape(computeOutAndReduceShapes(n.shape,i)[1]);return{x:()=>{let r=n.shape.slice();return i.forEach(t=>{r[t]=1}),aF(aD(a6(t,r),ones_ones(n.shape,"float32")),o)}}}},{kernelName:"Min",inputsToSave:["x"],outputsToSave:[!0],gradFunc:(t,r,a)=>{let{axis:n}=a,[s,i]=r,o=parseAxisParam(n,s.shape),l=gradForMinAndMax(t,i,s,o);return{x:()=>l.x()}}},{kernelName:t8,inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r;return{a:()=>aD(t,aE(st(a,n),"float32")),b:()=>aD(t,aE(n3(a,n),"float32"))}}},{kernelName:t7,inputsToSave:["x"],gradFunc:(t,r,a)=>{let n=r[0],{paddings:s}=a,i=s.map(t=>t[0]);return{x:()=>nt(t,i,n.shape)}}},{kernelName:"Mod",inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r,s=assertAndGetBroadcastShape(a.shape,n.shape);return{a:()=>{let r=getReductionAxes(a.shape,s);return r.length>0?a6(nH(t,r),a.shape):t},b:()=>{let r=aD(t,si(n1(aF(a,n)))),i=getReductionAxes(n.shape,s);return i.length>0?a6(nH(r,i),n.shape):r}}}},{kernelName:re,inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r,s=assertAndGetBroadcastShape(a.shape,n.shape);return{a:()=>{let r=aD(t,aE(n,"float32")),i=getReductionAxes(a.shape,s);return i.length>0?a6(nH(r,i),a.shape):r},b:()=>{let r=aD(t,aE(a,"float32")),i=getReductionAxes(n.shape,s);return i.length>0?a6(nH(r,i),n.shape):r}}}},{kernelName:"Neg",gradFunc:t=>({x:()=>si(t)})},{kernelName:ro,inputsToSave:["indices"],gradFunc:(t,r)=>{let a=r[0];return{indices:()=>zeros(a.shape,"float32")}}},{kernelName:ri,gradFunc:t=>({x:()=>aM(t)})},{kernelName:rl,saveAllInputs:!0,gradFunc:(t,r,a)=>{let{axis:n}=a;return iw(t,n).map(t=>()=>t)}},oV,oV,{kernelName:"Pow",inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(t,r)=>{let[a,n,s]=r,i=assertAndGetBroadcastShape(a.shape,n.shape);return{a:()=>{let r=aE(n,"float32"),s=aD(t,aD(r,aV(a,aB(r,scalar_scalar(1))))),o=getReductionAxes(a.shape,i);return o.length>0&&(s=nH(s,o)),a6(s,a.shape)},b:()=>{let r=aD(t,aD(s,nL(n3(a,0),sn(a),aM(a)))),o=getReductionAxes(n.shape,i);return o.length>0&&(r=nH(r,o)),a6(r,n.shape)}}}},{kernelName:rp,inputsToSave:["x","alpha"],gradFunc:(t,r)=>{let[a,n]=r,s=n3(a,0);return{x:()=>nL(s,t,aD(t,n)),alpha:()=>{let r=nL(s,aM(t),aD(t,a)),i=getReductionAxes(n.shape,t.shape);return i.length>0&&(r=nH(r,i)),a6(r,n.shape)}}}},{kernelName:rh,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{axis:s}=a,i=[];return i=null==s?n.shape.map((t,r)=>r):"number"==typeof s?[s]:s,{x:()=>prodsGradFn_(n,t,i)}}},{kernelName:ry,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,si(aO(a)))}}},{kernelName:rw,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r,n=aD(st(a,6),iy(a));return{x:()=>aD(t,aE(n,"float32"))}}},{kernelName:rx,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(t,aE(iy(a),"float32"))}}},{kernelName:rv,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>a6(t,a.shape)}}},{kernelName:rk,inputsToSave:["images"],gradFunc:(t,r,a)=>{let[n]=r,s={dy:t,images:n};return{images:()=>ay.runKernel(rS,s,a)}}},{kernelName:r_,inputsToSave:["images"],gradFunc:(t,r,a)=>{let[n]=r,s={dy:t,images:n};return{images:()=>ay.runKernel(rT,s,a)}}},{kernelName:rI,gradFunc:(t,r,a)=>{let{dims:n}=a,s=parseAxisParam(n,t.shape);return{x:()=>s0(t,s)}}},{kernelName:rN,gradFunc:t=>({x:()=>aM(t)})},{kernelName:rC,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>si(aF(t,aD(aV(a,1.5),2)))}}},{kernelName:rR,inputsToSave:["condition"],gradFunc:(t,r)=>{let[a]=r;return{condition:()=>aE(aM(a),"float32"),t:()=>aD(t,aE(a,t.dtype)),e:()=>aD(t,aE(sc(a),t.dtype))}}},{kernelName:rF,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>{let r=n3(a,scalar_scalar(0)),n=scalar_scalar(oT);return nL(r,aD(t,scalar_scalar(ok)),aD(aD(t,n),nY(aE(a,"float32"))))}}}},{kernelName:rM,outputsToSave:[!0],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(t,aD(a,aB(scalar_scalar(1),a)))}}},{kernelName:rO,gradFunc:t=>({x:()=>aM(t)})},{kernelName:"Sin",inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(nC(aE(a,"float32")),t)}}},{kernelName:rP,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(nE(aE(a,"float32")),t)}}},{kernelName:rD,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{begin:s,size:i}=a,o=n.shape,[l,u]=parseSliceParams(n,s,i),p=[];for(let r=0;rs$(t,p)}}},{kernelName:rW,outputsToSave:[!0],gradFunc:(t,r,a)=>{let[n]=r,{dim:s}=a,i=aD(t,n);return{logits:()=>aB(i,aD(nH(i,[s],!0),n))}}},{kernelName:rL,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(t,ne(a))}}},oB,oB,oW,oW,{kernelName:rz,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,aD(aP(aE(a,"float32")),2))}}},{kernelName:rq,inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r,s=scalar_scalar(2);return{a:()=>aD(t,aD(s,aB(a,n))),b:()=>aD(t,aD(s,aB(n,a)))}}},{kernelName:rX,inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(t,aD(aE(a,"float32"),2))}}},{kernelName:ae,gradFunc:t=>({x:()=>aM(t)})},{kernelName:"Sub",inputsToSave:["a","b"],gradFunc:(t,r)=>{let[a,n]=r,s=assertAndGetBroadcastShape(a.shape,n.shape);return{a:()=>{let r=t,n=getReductionAxes(a.shape,s);return n.length>0&&(r=nH(r,n)),a6(r,a.shape)},b:()=>{let r=t,a=getReductionAxes(n.shape,s);return a.length>0&&(r=nH(r,a)),a6(si(r),n.shape)}}}},{kernelName:"Sum",inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,s=n.shape.slice(),{axis:i}=a;parseAxisParam(i,n.shape).forEach(t=>{s[t]=1});let o=aD(a6(t,s),ones_ones(n.shape,"float32"));return{x:()=>o}}},{kernelName:"Tan",inputsToSave:["x"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aF(t,aO(nC(a)))}}},{kernelName:r1,outputsToSave:[!0],gradFunc:(t,r)=>{let[a]=r;return{x:()=>aD(aB(scalar_scalar(1),aO(a)),t)}}},{kernelName:r2,inputsToSave:["x"],gradFunc:(t,r,a)=>{let[n]=r,{reps:s}=a;return{x:()=>{let r=aM(n);if(1===n.rank)for(let a=0;a{let{perm:n}=a,s=getUndoAxesPermutation(n);return{x:()=>iI(t,s)}}},{kernelName:r8,gradFunc:(t,r,a)=>{let{axis:n}=a;return{value:()=>ig(t,n)}}},{kernelName:r7,inputsToSave:["segmentIds"],gradFunc:(t,r)=>{let[a]=r;return{x:()=>gatherDropNegatives(t,a)}}},{kernelName:r9,gradFunc:t=>({x:()=>aM(t)})}])registerGradient(t);getGlobalTensorClass().prototype.abs=function(){return this.throwIfDisposed(),aW(this)},getGlobalTensorClass().prototype.acos=function(){return this.throwIfDisposed(),aH(this)},getGlobalTensorClass().prototype.acosh=function(){return this.throwIfDisposed(),aq(this)},getGlobalTensorClass().prototype.add=function(t){return this.throwIfDisposed(),a$(this,t)},getGlobalTensorClass().prototype.all=function(t,r){return this.throwIfDisposed(),aY(this,t,r)},getGlobalTensorClass().prototype.any=function(t,r){return this.throwIfDisposed(),aZ(this,t,r)},getGlobalTensorClass().prototype.argMax=function(t){return this.throwIfDisposed(),aJ(this,t)},getGlobalTensorClass().prototype.argMin=function(t){return this.throwIfDisposed(),aQ(this,t)},getGlobalTensorClass().prototype.asScalar=function(){return this.throwIfDisposed(),assert(1===this.size,()=>"The array must have only 1 element."),a6(this,[])},getGlobalTensorClass().prototype.asType=function(t){return this.throwIfDisposed(),aE(this,t)},getGlobalTensorClass().prototype.as1D=function(){return this.throwIfDisposed(),a6(this,[this.size])},getGlobalTensorClass().prototype.as2D=function(t,r){return this.throwIfDisposed(),a6(this,[t,r])},getGlobalTensorClass().prototype.as3D=function(t,r,a){return this.throwIfDisposed(),a6(this,[t,r,a])},getGlobalTensorClass().prototype.as4D=function(t,r,a,n){return this.throwIfDisposed(),a6(this,[t,r,a,n])},getGlobalTensorClass().prototype.as5D=function(t,r,a,n,s){return this.throwIfDisposed(),a6(this,[t,r,a,n,s])},getGlobalTensorClass().prototype.asin=function(){return this.throwIfDisposed(),a0(this)},getGlobalTensorClass().prototype.asinh=function(){return this.throwIfDisposed(),a1(this)},getGlobalTensorClass().prototype.atan=function(){return this.throwIfDisposed(),a2(this)},getGlobalTensorClass().prototype.atan2=function(t){return this.throwIfDisposed(),a3(this,t)},getGlobalTensorClass().prototype.atanh=function(){return this.throwIfDisposed(),a4(this)},getGlobalTensorClass().prototype.avgPool=function(t,r,a,n){return this.throwIfDisposed(),a5(this,t,r,a,n)},getGlobalTensorClass().prototype.batchToSpaceND=function(t,r){return this.throwIfDisposed(),ns(this,t,r)},getGlobalTensorClass().prototype.batchNorm=function(t,r,a,n,s){return this.throwIfDisposed(),ni(this,t,r,a,n,s)},getGlobalTensorClass().prototype.broadcastTo=function(t){return this.throwIfDisposed(),nd(this,t)},getGlobalTensorClass().prototype.cast=function(t){return this.throwIfDisposed(),aE(this,t)},getGlobalTensorClass().prototype.ceil=function(){return this.throwIfDisposed(),nm(this)},getGlobalTensorClass().prototype.clipByValue=function(t,r){return this.throwIfDisposed(),nf(this,t,r)},getGlobalTensorClass().prototype.concat=function(t,r){return this.throwIfDisposed(),t instanceof tensor_Tensor&&(t=[t]),a7([this,...t],r)},getGlobalTensorClass().prototype.conv1d=function(t,r,a,n,s,i){return this.throwIfDisposed(),nT(this,t,r,a,n,s,i)},getGlobalTensorClass().prototype.conv2dTranspose=function(t,r,a,n,s){return this.throwIfDisposed(),nS(this,t,r,a,n,s)},getGlobalTensorClass().prototype.conv2d=function(t,r,a,n,s,i){return this.throwIfDisposed(),n_(this,t,r,a,n,s,i)},getGlobalTensorClass().prototype.cos=function(){return this.throwIfDisposed(),nC(this)},getGlobalTensorClass().prototype.cosh=function(){return this.throwIfDisposed(),nE(this)},getGlobalTensorClass().prototype.cumprod=function(t,r,a){return this.throwIfDisposed(),nA(this,t,r,a)},getGlobalTensorClass().prototype.cumsum=function(t,r,a){return this.throwIfDisposed(),n$(this,t,r,a)},getGlobalTensorClass().prototype.depthToSpace=function(t,r){return this.throwIfDisposed(),nF(this,t,r)},getGlobalTensorClass().prototype.depthwiseConv2d=function(t,r,a,n,s,i){return this.throwIfDisposed(),nD(this,t,r,a,n,s,i)},getGlobalTensorClass().prototype.dilation2d=function(t,r,a,n,s){return this.throwIfDisposed(),nO(this,t,r,a,n,s)},getGlobalTensorClass().prototype.divNoNan=function(t){return this.throwIfDisposed(),nz(this,t)},getGlobalTensorClass().prototype.div=function(t){return this.throwIfDisposed(),aF(this,t)},getGlobalTensorClass().prototype.dot=function(t){return this.throwIfDisposed(),nV(this,t)},getGlobalTensorClass().prototype.elu=function(){return this.throwIfDisposed(),nW(this)},getGlobalTensorClass().prototype.equal=function(t){return this.throwIfDisposed(),nM(this,t)},getGlobalTensorClass().prototype.erf=function(){return this.throwIfDisposed(),nG(this)},getGlobalTensorClass().prototype.euclideanNorm=function(t,r){return this.throwIfDisposed(),nX(this,t,r)},getGlobalTensorClass().prototype.exp=function(){return this.throwIfDisposed(),nY(this)},getGlobalTensorClass().prototype.expandDims=function(t){return this.throwIfDisposed(),nZ(this,t)},getGlobalTensorClass().prototype.expm1=function(){return this.throwIfDisposed(),nJ(this)},getGlobalTensorClass().prototype.fft=function(){return this.throwIfDisposed(),il(this)},getGlobalTensorClass().prototype.flatten=function(){return this.throwIfDisposed(),a6(this,[this.size])},getGlobalTensorClass().prototype.floor=function(){return this.throwIfDisposed(),n1(this)},getGlobalTensorClass().prototype.floorDiv=function(t){return this.throwIfDisposed(),aR(this,t)},getGlobalTensorClass().prototype.gather=function(t,r,a){return this.throwIfDisposed(),n2(this,t,r,a)},getGlobalTensorClass().prototype.greaterEqual=function(t){return this.throwIfDisposed(),n4(this,t)},getGlobalTensorClass().prototype.greater=function(t){return this.throwIfDisposed(),n3(this,t)},getGlobalTensorClass().prototype.ifft=function(){return this.throwIfDisposed(),iu(this)},getGlobalTensorClass().prototype.irfft=function(){return this.throwIfDisposed(),ip(this)},getGlobalTensorClass().prototype.isFinite=function(){return this.throwIfDisposed(),n5(this)},getGlobalTensorClass().prototype.isInf=function(){return this.throwIfDisposed(),n8(this)},getGlobalTensorClass().prototype.isNaN=function(){return this.throwIfDisposed(),n7(this)},getGlobalTensorClass().prototype.leakyRelu=function(t){return this.throwIfDisposed(),n9(this,t)},getGlobalTensorClass().prototype.lessEqual=function(t){return this.throwIfDisposed(),st(this,t)},getGlobalTensorClass().prototype.less=function(t){return this.throwIfDisposed(),se(this,t)},getGlobalTensorClass().prototype.localResponseNormalization=function(t,r,a,n){return this.throwIfDisposed(),sr(this,t,r,a,n)},getGlobalTensorClass().prototype.logSigmoid=function(){return this.throwIfDisposed(),sl(this)},getGlobalTensorClass().prototype.logSoftmax=function(t){return this.throwIfDisposed(),su(this,t)},getGlobalTensorClass().prototype.logSumExp=function(t,r){return this.throwIfDisposed(),sp(this,t,r)},getGlobalTensorClass().prototype.log=function(){return this.throwIfDisposed(),sn(this)},getGlobalTensorClass().prototype.log1p=function(){return this.throwIfDisposed(),ss(this)},getGlobalTensorClass().prototype.logicalAnd=function(t){return this.throwIfDisposed(),sh(this,t)},getGlobalTensorClass().prototype.logicalNot=function(){return this.throwIfDisposed(),sc(this)},getGlobalTensorClass().prototype.logicalOr=function(t){return this.throwIfDisposed(),sd(this,t)},getGlobalTensorClass().prototype.logicalXor=function(t){return this.throwIfDisposed(),sm(this,t)},getGlobalTensorClass().prototype.matMul=function(t,r,a){return this.throwIfDisposed(),a9(this,t,r,a)},getGlobalTensorClass().prototype.maxPool=function(t,r,a,n){return this.throwIfDisposed(),sg(this,t,r,a,n)},getGlobalTensorClass().prototype.max=function(t,r){return this.throwIfDisposed(),nj(this,t,r)},getGlobalTensorClass().prototype.maximum=function(t){return this.throwIfDisposed(),aU(this,t)},getGlobalTensorClass().prototype.mean=function(t,r){return this.throwIfDisposed(),sv(this,t,r)},getGlobalTensorClass().prototype.min=function(t,r){return this.throwIfDisposed(),nK(this,t,r)},getGlobalTensorClass().prototype.minimum=function(t){return this.throwIfDisposed(),s_(this,t)},getGlobalTensorClass().prototype.mirrorPad=function(t,r){return this.throwIfDisposed(),sT(this,t,r)},getGlobalTensorClass().prototype.mod=function(t){return this.throwIfDisposed(),sk(this,t)},getGlobalTensorClass().prototype.mul=function(t){return this.throwIfDisposed(),aD(this,t)},getGlobalTensorClass().prototype.neg=function(){return this.throwIfDisposed(),si(this)},getGlobalTensorClass().prototype.norm=function(t,r,a){return this.throwIfDisposed(),nq(this,t,r,a)},getGlobalTensorClass().prototype.notEqual=function(t){return this.throwIfDisposed(),sN(this,t)},getGlobalTensorClass().prototype.oneHot=function(t,r=1,a=0){return this.throwIfDisposed(),sC(this,t,r,a)},getGlobalTensorClass().prototype.onesLike=function(){return this.throwIfDisposed(),sE(this)},getGlobalTensorClass().prototype.pad=function(t,r){return this.throwIfDisposed(),s$(this,t,r)},getGlobalTensorClass().prototype.pool=function(t,r,a,n,s,i){return this.throwIfDisposed(),sM(this,t,r,a,n,s,i)},getGlobalTensorClass().prototype.pow=function(t){return this.throwIfDisposed(),aV(this,t)},getGlobalTensorClass().prototype.prelu=function(t){return this.throwIfDisposed(),sL(this,t)},getGlobalTensorClass().prototype.prod=function(t,r){return this.throwIfDisposed(),sz(this,t,r)},getGlobalTensorClass().prototype.reciprocal=function(){return this.throwIfDisposed(),sZ(this)},getGlobalTensorClass().prototype.relu=function(){return this.throwIfDisposed(),sJ(this)},getGlobalTensorClass().prototype.relu6=function(){return this.throwIfDisposed(),sQ(this)},getGlobalTensorClass().prototype.reshapeAs=function(t){return this.throwIfDisposed(),a6(this,t.shape)},getGlobalTensorClass().prototype.reshape=function(t){return this.throwIfDisposed(),a6(this,t)},getGlobalTensorClass().prototype.resizeBilinear=function(t,r,a){return this.throwIfDisposed(),iY(this,t,r,a)},getGlobalTensorClass().prototype.resizeNearestNeighbor=function(t,r,a){return this.throwIfDisposed(),iZ(this,t,r,a)},getGlobalTensorClass().prototype.reverse=function(t){return this.throwIfDisposed(),s0(this,t)},getGlobalTensorClass().prototype.rfft=function(){return this.throwIfDisposed(),ic(this)},getGlobalTensorClass().prototype.round=function(){return this.throwIfDisposed(),s6(this)},getGlobalTensorClass().prototype.rsqrt=function(){return this.throwIfDisposed(),s5(this)},getGlobalTensorClass().prototype.selu=function(){return this.throwIfDisposed(),s8(this)},getGlobalTensorClass().prototype.separableConv2d=function(t,r,a,n,s,i){return this.throwIfDisposed(),s7(this,t,r,a,n,s,i)},getGlobalTensorClass().prototype.sigmoid=function(){return this.throwIfDisposed(),ne(this)},getGlobalTensorClass().prototype.sign=function(){return this.throwIfDisposed(),s9(this)},getGlobalTensorClass().prototype.sin=function(){return this.throwIfDisposed(),ie(this)},getGlobalTensorClass().prototype.sinh=function(){return this.throwIfDisposed(),it(this)},getGlobalTensorClass().prototype.slice=function(t,r){return this.throwIfDisposed(),nt(this,t,r)},getGlobalTensorClass().prototype.softmax=function(t){return this.throwIfDisposed(),io(this,t)},getGlobalTensorClass().prototype.softplus=function(){return this.throwIfDisposed(),so(this)},getGlobalTensorClass().prototype.spaceToBatchND=function(t,r){return this.throwIfDisposed(),sO(this,t,r)},getGlobalTensorClass().prototype.split=function(t,r){return this.throwIfDisposed(),ih(this,t,r)},getGlobalTensorClass().prototype.sqrt=function(){return this.throwIfDisposed(),aP(this)},getGlobalTensorClass().prototype.square=function(){return this.throwIfDisposed(),aO(this)},getGlobalTensorClass().prototype.squaredDifference=function(t){return this.throwIfDisposed(),id(this,t)},getGlobalTensorClass().prototype.squeeze=function(t){return this.throwIfDisposed(),im(this,t)},getGlobalTensorClass().prototype.stack=function(t,r){return this.throwIfDisposed(),ig(t instanceof tensor_Tensor?[this,t]:[this,...t],r)},getGlobalTensorClass().prototype.step=function(t){return this.throwIfDisposed(),iy(this,t)},getGlobalTensorClass().prototype.stridedSlice=function(t,r,a,n,s,i,o,l){return this.throwIfDisposed(),ib(this,t,r,a,n,s,i,o,l)},getGlobalTensorClass().prototype.sub=function(t){return this.throwIfDisposed(),aB(this,t)},getGlobalTensorClass().prototype.sum=function(t,r){return this.throwIfDisposed(),nH(this,t,r)},getGlobalTensorClass().prototype.tan=function(){return this.throwIfDisposed(),ix(this)},getGlobalTensorClass().prototype.tanh=function(){return this.throwIfDisposed(),nr(this)},getGlobalTensorClass().prototype.tile=function(t){return this.throwIfDisposed(),nQ(this,t)},getGlobalTensorClass().prototype.toBool=function(){return this.throwIfDisposed(),aE(this,"bool")},getGlobalTensorClass().prototype.toFloat=function(){return this.throwIfDisposed(),aE(this,"float32")},getGlobalTensorClass().prototype.toInt=function(){return this.throwIfDisposed(),aE(this,"int32")},getGlobalTensorClass().prototype.topk=function(t,r){return this.throwIfDisposed(),i_(this,t,r)},getGlobalTensorClass().prototype.transpose=function(t){return this.throwIfDisposed(),iI(this,t)},getGlobalTensorClass().prototype.unique=function(t){return this.throwIfDisposed(),ik(this,t)},getGlobalTensorClass().prototype.unsortedSegmentSum=function(t,r){return this.throwIfDisposed(),iS(this,t,r)},getGlobalTensorClass().prototype.unstack=function(t){return this.throwIfDisposed(),iw(this,t)},getGlobalTensorClass().prototype.where=function(t,r){return this.throwIfDisposed(),nL(t,this,r)},getGlobalTensorClass().prototype.zerosLike=function(){return this.throwIfDisposed(),aM(this)};let AttributeError=class AttributeError extends Error{constructor(t){super(t),Object.setPrototypeOf(this,AttributeError.prototype)}};let RuntimeError=class RuntimeError extends Error{constructor(t){super(t),Object.setPrototypeOf(this,RuntimeError.prototype)}};let errors_ValueError=class errors_ValueError extends Error{constructor(t){super(t),Object.setPrototypeOf(this,errors_ValueError.prototype)}};let errors_NotImplementedError=class errors_NotImplementedError extends Error{constructor(t){super(t),Object.setPrototypeOf(this,errors_NotImplementedError.prototype)}};let AssertionError=class AssertionError extends Error{constructor(t){super(t),Object.setPrototypeOf(this,AssertionError.prototype)}};let LruCache=class LruCache{constructor(t){this.maxEntries=t||100,this.cache=new Map}get(t){let r;return this.cache.has(t)&&(r=this.cache.get(t),this.cache.delete(t),this.cache.set(t,r)),r}put(t,r){if(this.cache.has(t))this.cache.delete(t);else if(this.cache.size>=this.maxEntries){let t=this.cache.keys().next().value;this.cache.delete(t)}this.cache.set(t,r)}getMaxEntries(){return this.maxEntries}setMaxEntries(t){if(t<0)throw Error(`The maxEntries of LRU caches must be at least 0, but got ${t}.`);if(this.maxEntries>t)for(let r=0;rr.toUpperCase())}let oU={};function serializeKerasObject(t){if(null==t)return null;let r={};return r.className=t.getClassName(),r.config=t.getConfig(),r}function convertNDArrayScalarsInConfig(t){if(null!=t&&"object"==typeof t)if(Array.isArray(t))t.forEach(t=>convertNDArrayScalarsInConfig(t));else for(let r of Object.keys(t)){let a=t[r];null!=a&&"object"==typeof a&&(Array.isArray(a)||"ndarray"!==a.type||"number"!=typeof a.value?convertNDArrayScalarsInConfig(a):t[r]=a.value)}}function deserializeKerasObject(t,r={},a={},n="object",s=!1){if("string"==typeof t){let s;if(t in a)s=a[t];else if(t in oU)s=oU[t];else if(null==(s=r[t]))throw new errors_ValueError(`Unknown ${n}: ${t}. This may be due to one of the following reasons: 1. The ${n} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. 2. The custom ${n} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);return s}{let i,o;if(null==t.className||null==t.config)throw new errors_ValueError(`${n}: Improper config format: ${JSON.stringify(t)}. 'className' and 'config' must set.`);let l=t.className;if(l in a?[i,o]=a[l]:l in oU?[i,o]=oU.className:l in r&&([i,o]=r[l]),null==i)throw new errors_ValueError(`Unknown ${n}: ${l}. This may be due to one of the following reasons: 1. The ${n} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. 2. The custom ${n} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(null!=o){let r={};for(let t of Object.keys(oU))r[t]=oU[t];for(let t of Object.keys(a))r[t]=a[t];t.config.customObjects=r;let n=Object.assign({},oU);for(let t of Object.keys(a))oU[t]=a[t];convertNDArrayScalarsInConfig(t.config);let l=o(i,t.config,a,s);return oU=Object.assign({},n),l}{let r=Object.assign({},oU);for(let t of Object.keys(a))oU[t]=a[t];let n=new i(t.config);return oU=Object.assign({},r),n}}}function numberCompare(t,r){return tr)}function reverseNumberCompare(t,r){return -1*numberCompare(t,r)}function generic_utils_unique(t){if(null==t)return t;let r=[];for(let a of t)-1===r.indexOf(a)&&r.push(a);return r}function isObjectEmpty(t){if(null==t)throw new errors_ValueError(`Invalid value in obj: ${JSON.stringify(t)}`);for(let r in t)if(t.hasOwnProperty(r))return!1;return!0}function checkStringTypeUnionValue(t,r,a){if(null!=a&&0>t.indexOf(a))throw new errors_ValueError(`${a} is not a valid ${r}. Valid values are ${t} or null/undefined.`)}function checkArrayTypeAndLength(t,r,a=0,n=1/0){return generic_utils_assert(a>=0),generic_utils_assert(n>=a),Array.isArray(t)&&t.length>=a&&t.length<=n&&t.every(t=>typeof t===r)}function assertPositiveInteger(t,r){Array.isArray(t)?(assert(t.length>0,()=>`${r} is unexpectedly an empty array.`),t.forEach((t,a)=>assertPositiveInteger(t,`element ${a+1} of ${r}`))):assert(Number.isInteger(t)&&t>0,()=>`Expected ${r} to be a positive integer, but got ${formatAsFriendlyString(t)}.`)}function formatAsFriendlyString(t){return null===t?"null":Array.isArray(t)?"["+t.map(t=>formatAsFriendlyString(t)).join(",")+"]":"string"==typeof t?`"${t}"`:`${t}`}function debounce(t,r,a){let n,s=null!=a?a():util_now();return(...i)=>{let o=null!=a?a():util_now();return o-s0))return t;{let a=`${t}_${r}`;return oZ.set(a,1),a}}let oQ=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function isValidTensorName(t){return!!t.match(oQ)}function isInteger(t){return t===parseInt(t.toString(),10)}function arrayProd(t,r,a){null==r&&(r=0),null==a&&(a=t.length);let n=1;for(let s=r;sr&&(r=n)}return r}function math_utils_range(t,r){if(r{if(2!==t.shape.length)throw new errors_ValueError(`repeat() expects a rank-2 tensor, but received a rank-${t.shape.length} tensor.`);return tfjs_backend_tile(tfjs_backend_expandDims(t,1),[1,r,1])})}function tfjs_backend_flatten(t){let r=[arrayProd(t.shape)];return a6(t,r)}function batchFlatten(t){if(t.rank<=1)throw new errors_ValueError(`batchFlatten requires a minimum rank of 2. Got rank: ${t.rank}.`);let r=[t.shape[0],arrayProd(t.shape,1)];return a6(t,r)}function sliceAlongFirstAxis(t,r,a){return globals_tidy(()=>{switch(t.rank){case 1:return ir(t,r,a);case 2:return ia(t,[r,0],[a,t.shape[1]]);case 3:return is(t,[r,0,0],[a,t.shape[1],t.shape[2]]);case 4:return ii(t,[r,0,0,0],[a,t.shape[1],t.shape[2],t.shape[3]]);case 5:return nt(t,[r,0,0,0,0],[a,t.shape[1],t.shape[2],t.shape[3],t.shape[4]]);case 6:return nt(t,[r,0,0,0,0,0],[a,t.shape[1],t.shape[2],t.shape[3],t.shape[4],t.shape[5]]);default:throw new errors_ValueError(`sliceAlongFirstAxis() received an unsupported tensor rank: ${t.rank}`)}})}function sliceAlongLastAxis(t,r,a){return globals_tidy(()=>{switch(t.rank){case 1:return ir(t,r,a);case 2:return ia(t,[0,r],[t.shape[0],a]);case 3:return is(t,[0,0,r],[t.shape[0],t.shape[1],a]);case 4:return ii(t,[0,0,0,r],[t.shape[0],t.shape[1],t.shape[2],a]);default:throw new errors_ValueError(`sliceAlongLastAxis() received an unsupported tensor rank: ${t.rank}`)}})}function sliceAlongAxis(t,r,a,n){return globals_tidy(()=>{switch(t.rank){case 1:return ir(t,r,a);case 2:switch(n){case 1:return sliceAlongFirstAxis(t,r,a);case 2:return sliceAlongLastAxis(t,r,a);default:throw new errors_ValueError(`The axis is not within the rank of the tensor ${n}`)}case 3:switch(n){case 1:return sliceAlongFirstAxis(t,r,a);case 2:return is(t,[0,r,0],[t.shape[0],a,t.shape[2]]);case 3:return sliceAlongLastAxis(t,r,a);default:throw new errors_ValueError(`The axis is not within the rank of the tensor ${n}`)}case 4:switch(n){case 1:return sliceAlongFirstAxis(t,r,a);case 2:return ii(t,[0,r,0,0],[t.shape[0],a,t.shape[2],t.shape[3]]);case 3:return ii(t,[0,0,r,0],[t.shape[0],t.shape[1],a,t.shape[3]]);case 4:return sliceAlongLastAxis(t,r,a);default:throw new errors_ValueError(`The axis is not within the rank of the tensor ${n}`)}default:throw new errors_ValueError(`sliceAlongLastAxis() received an unsupported tensor rank: ${t.rank}`)}})}function concatenate(t,r=-1){let a;return r<0&&(r=0!==(a=t[0].rank)?a:0),r===t[0].rank&&(r=-1),a7(t,r)}function concatAlongFirstAxis(t,r){switch(t.rank){case 1:return ng([t,r]);case 2:return ny([t,r],0);case 3:return nx([t,r],0);case 4:return nv([t,r],0);default:throw new errors_ValueError(`concatAlongFirstAxis() received an unsupported tensor rank: ${t.rank}`)}}function tfjs_backend_tile(t,r){if(Array.isArray(r)||(r=[r]),t.rank!==r.length)throw new errors_ValueError(`The length of input n (${r.length}) does not match the number of dimensions in input x (${t.rank})`);return nQ(t,r)}function tfjs_backend_randomNormal(t,r=0,a=1,n,s){return sK(t,r,a,n,s)}function tfjs_backend_dot(t,r,a,n){if(t.rank<2||r.rank<2)throw new errors_NotImplementedError(`dot requires both inputs to be rank >= 2 but got x shape = ${t.shape} and y shape = ${r.shape}`);if(r.rank>=3&&t.shape.slice(-1)[0]!==r.shape.slice(-2)[0])throw new errors_NotImplementedError(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${t.shape} and y shape = ${r.shape}`);if(2===t.rank&&2===r.rank)return iM({a:t,b:r,transposeA:!1,transposeB:!1,bias:n?reshapeBias(t.rank,n,imageDataFormat()):null,activation:a});{let s=t.shape.slice();t=a6(t,[-1,s.pop()]);let i=r.shape.slice(),o=i.pop(),l=i.pop(),u=[...i,o],p=Array.from({length:r.rank},(t,a)=>0===a?r.rank-2:a<=r.rank-2?a-1:a);r=a6(iI(r,p),[l,-1]);let m=[...s,...u];return a6(iM({a:t,b:r,transposeA:!1,transposeB:!1,bias:n?reshapeBias(t.rank,n,imageDataFormat()):null,activation:a}),m)}}function tfjs_backend_gather(t,r,a){return globals_tidy(()=>n2(t,r=Array.isArray(r)?tensor1d(r,"int32"):aE(r,"int32"),a))}function reshapeBias(t,r,a){let n=r.shape;if(1!==r.rank&&r.rank!==t)throw new errors_ValueError(`Unexpected bias dimensions: ${r.rank}; expected it to be 1 or ${t}`);if(5===t){if("channelsFirst"===a)if(1===n.length)return a6(r,[1,n[0],1,1,1]);else return a6(r,[1,n[3],n[0],n[1],n[2]]);else if("channelsLast"===a)if(1===n.length)return a6(r,[1,1,1,1,n[0]]);else return a6(r,[1].concat(n))}else if(4===t){if("channelsFirst"===a)if(1===n.length)return a6(r,[1,n[0],1,1]);else return a6(r,[1,n[2],n[0],n[1]]);else if("channelsLast"===a)if(1===n.length)return a6(r,[1,1,1,n[0]]);else return a6(r,[1].concat(n))}else if(3===t){if("channelsFirst"===a)if(1===n.length)return a6(r,[1,n[0],1]);else return a6(r,[1,n[1],n[0]]);else if("channelsLast"===a)if(1===n.length)return a6(r,[1,1,n[0]]);else return a6(r,[1].concat(n))}else if(t<3)return r;throw new errors_ValueError(`Unsupported input rank by biasAdd: ${r.rank}`)}function biasAdd(t,r,a){return globals_tidy(()=>(null==a&&(a=imageDataFormat()),common_checkDataFormat(a),a$(t,reshapeBias(t.rank,r,a))))}function tfjs_backend_elu(t,r=1){if(1!==r)throw new errors_NotImplementedError(`Support for alpha values other than 1 (${r}) is not implemented yet.`);return nW(t)}function softsign(t){return globals_tidy(()=>aF(t,a$(aW(t),1)))}function hardSigmoid(t){return globals_tidy(()=>nf(a$(.5,aD(.2,t)),0,1))}function inTrainPhase(t,r,a=!1){return a?t():r()}let o0=["fanIn","fanOut","fanAvg"],o1=["normal","uniform","truncatedNormal"];function checkFanMode(t){checkStringTypeUnionValue(o0,"FanMode",t)}function checkDistribution(t){checkStringTypeUnionValue(o1,"Distribution",t)}let Initializer=class Initializer extends Serializable{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}};let initializers_Zeros=class initializers_Zeros extends Initializer{apply(t,r){return zeros(t,r)}};initializers_Zeros.className="Zeros",registerClass(initializers_Zeros);let initializers_Ones=class initializers_Ones extends Initializer{apply(t,r){return ones_ones(t,r)}};initializers_Ones.className="Ones",registerClass(initializers_Ones);let initializers_Constant=class initializers_Constant extends Initializer{constructor(t){if(super(),"object"!=typeof t)throw new errors_ValueError(`Expected argument of type ConstantConfig but got ${t}`);if(void 0===t.value)throw new errors_ValueError(`config must have value set but got ${t}`);this.value=t.value}apply(t,r){return globals_tidy(()=>aD(scalar_scalar(this.value),ones_ones(t,r)))}getConfig(){return{value:this.value}}};initializers_Constant.className="Constant",registerClass(initializers_Constant);let initializers_RandomUniform=class initializers_RandomUniform extends Initializer{constructor(t){super(),this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=t.minval||this.DEFAULT_MINVAL,this.maxval=t.maxval||this.DEFAULT_MAXVAL,this.seed=t.seed}apply(t,r){return sq(t,this.minval,this.maxval,r,this.seed)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}};initializers_RandomUniform.className="RandomUniform",registerClass(initializers_RandomUniform);let initializers_RandomNormal=class initializers_RandomNormal extends Initializer{constructor(t){super(),this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=t.mean||this.DEFAULT_MEAN,this.stddev=t.stddev||this.DEFAULT_STDDEV,this.seed=t.seed}apply(t,r){if("float32"!==(r=r||"float32")&&"int32"!==r)throw new errors_NotImplementedError(`randomNormal does not support dType ${r}.`);return tfjs_backend_randomNormal(t,this.mean,this.stddev,r,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}};initializers_RandomNormal.className="RandomNormal",registerClass(initializers_RandomNormal);let initializers_TruncatedNormal=class initializers_TruncatedNormal extends Initializer{constructor(t){super(),this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=t.mean||this.DEFAULT_MEAN,this.stddev=t.stddev||this.DEFAULT_STDDEV,this.seed=t.seed}apply(t,r){if("float32"!==(r=r||"float32")&&"int32"!==r)throw new errors_NotImplementedError(`truncatedNormal does not support dType ${r}.`);return iT(t,this.mean,this.stddev,r,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}};initializers_TruncatedNormal.className="TruncatedNormal",registerClass(initializers_TruncatedNormal);let initializers_Identity=class initializers_Identity extends Initializer{constructor(t){super(),this.gain=null!=t.gain?t.gain:1}apply(t,r){return globals_tidy(()=>{if(2===t.length&&t[0]===t[1])return aD(this.gain,n0(t[0]));throw new errors_ValueError("Identity matrix initializer can only be used for 2D square matrices.")})}getConfig(){return{gain:this.gain}}};function computeFans(t,r="channelsLast"){let a,n;if(common_checkDataFormat(r),2===t.length)a=t[0],n=t[1];else if(-1!==[3,4,5].indexOf(t.length)){if("channelsFirst"===r){let r=arrayProd(t,2);a=t[1]*r,n=t[0]*r}else if("channelsLast"===r){let r=arrayProd(t,0,t.length-2);a=t[t.length-2]*r,n=t[t.length-1]*r}}else{let r=arrayProd(t);a=Math.sqrt(r),n=Math.sqrt(r)}return[a,n]}initializers_Identity.className="Identity",registerClass(initializers_Identity);let initializers_VarianceScaling=class initializers_VarianceScaling extends Initializer{constructor(t){if(super(),t.scale<0)throw new errors_ValueError(`scale must be a positive float. Got: ${t.scale}`);this.scale=null==t.scale?1:t.scale,this.mode=null==t.mode?"fanIn":t.mode,checkFanMode(this.mode),this.distribution=null==t.distribution?"normal":t.distribution,checkDistribution(this.distribution),this.seed=t.seed}apply(t,r){let a=computeFans(t),n=a[0],s=a[1],i=this.scale;if("fanIn"===this.mode?i/=Math.max(1,n):"fanOut"===this.mode?i/=Math.max(1,s):i/=Math.max(1,(n+s)/2),"normal"===this.distribution){let a=Math.sqrt(i);if("float32"!==(r=r||"float32")&&"int32"!==r)throw new errors_NotImplementedError(`${this.getClassName()} does not support dType ${r}.`);return iT(t,0,a,r,this.seed)}{let a=Math.sqrt(3*i);return sq(t,-a,a,r,this.seed)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}};initializers_VarianceScaling.className="VarianceScaling",registerClass(initializers_VarianceScaling);let initializers_GlorotUniform=class initializers_GlorotUniform extends initializers_VarianceScaling{constructor(t){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:null==t?null:t.seed})}getClassName(){return initializers_VarianceScaling.className}};initializers_GlorotUniform.className="GlorotUniform",registerClass(initializers_GlorotUniform);let initializers_GlorotNormal=class initializers_GlorotNormal extends initializers_VarianceScaling{constructor(t){super({scale:1,mode:"fanAvg",distribution:"normal",seed:null==t?null:t.seed})}getClassName(){return initializers_VarianceScaling.className}};initializers_GlorotNormal.className="GlorotNormal",registerClass(initializers_GlorotNormal);let initializers_HeNormal=class initializers_HeNormal extends initializers_VarianceScaling{constructor(t){super({scale:2,mode:"fanIn",distribution:"normal",seed:null==t?null:t.seed})}getClassName(){return initializers_VarianceScaling.className}};initializers_HeNormal.className="HeNormal",registerClass(initializers_HeNormal);let initializers_HeUniform=class initializers_HeUniform extends initializers_VarianceScaling{constructor(t){super({scale:2,mode:"fanIn",distribution:"uniform",seed:null==t?null:t.seed})}getClassName(){return initializers_VarianceScaling.className}};initializers_HeUniform.className="HeUniform",registerClass(initializers_HeUniform);let initializers_LeCunNormal=class initializers_LeCunNormal extends initializers_VarianceScaling{constructor(t){super({scale:1,mode:"fanIn",distribution:"normal",seed:null==t?null:t.seed})}getClassName(){return initializers_VarianceScaling.className}};initializers_LeCunNormal.className="LeCunNormal",registerClass(initializers_LeCunNormal);let initializers_LeCunUniform=class initializers_LeCunUniform extends initializers_VarianceScaling{constructor(t){super({scale:1,mode:"fanIn",distribution:"uniform",seed:null==t?null:t.seed})}getClassName(){return initializers_VarianceScaling.className}};initializers_LeCunUniform.className="LeCunUniform",registerClass(initializers_LeCunUniform);let initializers_Orthogonal=class initializers_Orthogonal extends Initializer{constructor(t){super(),this.DEFAULT_GAIN=1,this.ELEMENTS_WARN_SLOW=2e3,this.gain=null==t.gain?this.DEFAULT_GAIN:t.gain,this.seed=t.seed}apply(t,r){return globals_tidy(()=>{if(t.length<2)throw new errors_NotImplementedError("Shape must be at least 2D.");if("int32"!==r&&"float32"!==r&&void 0!==r)throw TypeError(`Unsupported data type ${r}.`);let a=sizeFromShape(t.slice(0,-1)),n=t[t.length-1],s=a*n;s>this.ELEMENTS_WARN_SLOW&&console.warn(`Orthogonal initializer is being called on a matrix with more than ${this.ELEMENTS_WARN_SLOW} (${s}) elements: Slowness may result.`);let i=tfjs_backend_randomNormal([Math.max(n,a),Math.min(n,a)],0,1,r,this.seed),o=of.qr(i,!1),l=o[0];return l=aD(l,o[1].flatten().stridedSlice([0],[Math.min(n,a)*Math.min(n,a)],[Math.min(n,a)+1]).sign()),at*r);return r}let o3="Variable";let LayerVariable=class LayerVariable{constructor(t,r="float32",a=o3,n=!0,s=null){this.dtype=null==r?"float32":r,this.shape=t.shape,this.id=oG++,a=null==a?o3:a,this.originalName=getScopedTensorName(a),this.name=getUniqueTensorName(this.originalName),this.trainable_=n,this.constraint=s,this.val=variable(t,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(t){return this.assertNotDisposed(),checkShapesMatch(this.val,t),this.val.id!==t.id&&(this.val.assign(t),null!=this.constraint&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(t){this.trainable_=t,this.val.trainable=t}};function checkShapesMatch(t,r){if(t.shape.toString()!==r.shape.toString())throw Error("Shape mismatch: "+JSON.stringify(t.shape)+" vs. "+JSON.stringify(r.shape))}function batchGetValue(t){return t.map(t=>t.read())}function batchSetValue(t){t.forEach(t=>{t[0].write(t[1])})}let InputSpec=class InputSpec{constructor(t){this.dtype=t.dtype,this.shape=t.shape,null!=t.shape?this.ndim=t.shape.length:this.ndim=t.ndim,this.maxNDim=t.maxNDim,this.minNDim=t.minNDim,this.axes=t.axes||{}}};let SymbolicTensor=class SymbolicTensor{constructor(t,r,a,n,s,i,o){this.dtype=t,this.shape=r,this.sourceLayer=a,this.inputs=n,this.callArgs=s,this.outputTensorIndex=o,this.id=oG++,null!=i&&(this.originalName=getScopedTensorName(i),this.name=getUniqueTensorName(this.originalName)),this.rank=r.length}};let o4=0;let Node=class Node{constructor(t,r){for(const a of(this.callArgs=r,this.id=o4++,this.outboundLayer=t.outboundLayer,this.inboundLayers=t.inboundLayers,this.nodeIndices=t.nodeIndices,this.tensorIndices=t.tensorIndices,this.inputTensors=t.inputTensors,this.outputTensors=t.outputTensors,this.inputMasks=t.inputMasks,this.outputMasks=t.outputMasks,this.inputShapes=t.inputShapes,this.outputShapes=t.outputShapes,t.inboundLayers))null!=a&&a.outboundNodes.push(this);t.outboundLayer.inboundNodes.push(this)}getConfig(){let t=[];for(let r of this.inboundLayers)null!=r?t.push(r.name):t.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:t,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}};let o6=0;let Layer=class Layer extends Serializable{constructor(t={}){super(),this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=o6++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let r=t.name;if(!r){const t=this.getClassName();r=toSnakeCase(t)+"_"+getUid(t)}if(this.name=r,this.trainable_=null==t.trainable||t.trainable,null!=t.inputShape||null!=t.batchInputShape){let r;if(null!=t.batchInputShape)r=t.batchInputShape;else if(null!=t.inputShape){let a=null;null!=t.batchSize&&(a=t.batchSize),r=[a].concat(t.inputShape)}this.batchInputShape=r;let a=t.dtype;null==a&&(a=t.inputDType),null==a&&(a="float32"),this.dtype=a}null!=t.weights?this.initialWeights=t.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(t,r){return t.name+"_ib-"+r.toString()}getNodeAtIndex(t,r){if(0===this.inboundNodes.length)throw new RuntimeError(`The layer has never been called and thus has no defined ${r}.`);if(this.inboundNodes.length<=t)throw new errors_ValueError(`Asked to get ${r} at node ${t}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[t]}getInputAt(t){return singletonOrArray(this.getNodeAtIndex(t,"input").inputTensors)}getOutputAt(t){return singletonOrArray(this.getNodeAtIndex(t,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new AttributeError(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(0===this.inboundNodes.length)throw new AttributeError(`Layer ${this.name} is not connected, no input to return.`);return singletonOrArray(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(0===this.inboundNodes.length)throw new AttributeError(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new AttributeError(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return singletonOrArray(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map(t=>t())}get updates(){return this._updates}get built(){return this._built}set built(t){this._built=t}get trainable(){return this.trainable_}set trainable(t){this._trainableWeights.forEach(r=>r.trainable=t),this.trainable_=t}get trainableWeights(){return this.trainable_?this._trainableWeights.filter(t=>t.trainable):[]}set trainableWeights(t){this._trainableWeights=t}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter(t=>!t.trainable).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(t){this._nonTrainableWeights=t}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(t){let r=toList(t);if(null==this.inputSpec||0===this.inputSpec.length)return;let a=toList(this.inputSpec);if(r.length!==a.length)throw new errors_ValueError(`Layer ${this.name} expects ${a.length} inputs, but it received ${r.length} input tensors. Input received: ${t}`);for(let t=0;ts.maxNDim)throw new errors_ValueError(`Input ${t} is incompatible with layer ${this.name}: expected max_ndim=${s.maxNDim}, found ndim=${i}`);if(null!=s.minNDim&&i=0?r[n]:r[r.length+n];if(null!=i&&-1===[i,null].indexOf(o))throw new errors_ValueError(`Input ${t} is incompatible with layer ${this.name}: expected axis ${n} of input shape to have value ${i} but got shape ${r}.`)}}if(null!=s.shape)for(let r=0;r{if(!this.built){this.assertInputCompatibility(t);let r=[];for(let a of toList(t))r.push(a.shape);this.build(singletonOrArray(r)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),null===this._refCount&&s&&(this._refCount=1)}if(this.assertInputCompatibility(t),s){let n=this.call(t,r);this.supportsMasking&&this.setMaskMetadata(t,n);let s=toList(n),i=[];for(let t of s)-1!==a.indexOf(t)&&(t=t.clone()),i.push(t);if(n=singletonOrArray(i),null!=this.activityRegularizer)throw new errors_NotImplementedError("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return n}{let a,n=collectInputShape(t),s=this.computeOutputShape(n),i=guessOutputDType(t);if(this.warnOnIncompatibleInputShape(Array.isArray(t)?n[0]:n),a=null!=s&&s.length>0&&Array.isArray(s[0])?s.map((a,n)=>new SymbolicTensor(i,a,this,toList(t),r,this.name,n)):new SymbolicTensor(i,s,this,toList(t),r,this.name),this.addInboundNode(t,a,null,null,n,s,r),this._refCount++,null!=this.activityRegularizer)throw new errors_NotImplementedError("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return a}})}warnOnIncompatibleInputShape(t){if(null!=this.batchInputShape)if(t.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(t)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let r=!1;this.batchInputShape.forEach((a,n)=>{null!=a&&null!=t[n]&&t[n]!==a&&(r=!0)}),r&&console.warn(`The shape of the input tensor (${JSON.stringify(t)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(null==this.inboundNodes||0===this.inboundNodes.length)throw new AttributeError(`The layer ${this.name} has never been called and thus has no defined output shape.`);let t=[];for(let r of this.inboundNodes){let a=JSON.stringify(r.outputShapes);-1===t.indexOf(a)&&t.push(a)}if(1===t.length){let t=this.inboundNodes[0].outputShapes;return Array.isArray(t)&&Array.isArray(t[0])&&1===t.length?t[0]:t}throw new AttributeError(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new RuntimeError(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return countParamsInWeights(this.weights)}build(t){this.built=!0}getWeights(t=!1){return batchGetValue(t?this.trainableWeights:this.weights)}setWeights(t){globals_tidy(()=>{let r=this.weights;if(r.length!==t.length)throw new errors_ValueError(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${t.length}, but the layer was expecting ${r.length} weights. Provided weights: ${t}...`);if(0===r.length)return;let a=[],n=batchGetValue(r);for(let s=0;ss.apply(p.read())),null==i&&(i=!0),i?this._trainableWeights.push(p):this._nonTrainableWeights.push(p),p}setFastWeightInitDuringBuild(t){this.fastWeightInitDuringBuild=t}addLoss(t){null==t||Array.isArray(t)&&0===t.length||(t=toList(t),void 0!==this._losses&&null!==this._losses&&this.losses.push(...t))}computeOutputShape(t){return t}computeMask(t,r){if(!this.supportsMasking){if(null!=r)if(Array.isArray(r))r.forEach(t=>{if(null!=t)throw TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)});else throw TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);return null}return r}setMaskMetadata(t,r,a){if(!this.supportsMasking)return;let n=this.computeMask(t,a),s=toList(r),i=toList(n);if(s.length!==i.length)throw Error(`${this.name} outputs ${s.length} tensors but ${s.length} masks for those tensors`);for(let t=0;tt.dispose()),this.weights.length}assertNotDisposed(){if(0===this._refCount)throw Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(null===this._refCount)throw Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let t=0;return 0==--this._refCount&&(t=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:t}}};function collectInputShape(t){t=toList(t);let r=[];for(let a of t)r.push(a.shape);return singletonOrArray(r)}function guessOutputDType(t){return"float32"}function getSourceInputs(t,r,a){if((null==r||null!=a&&a>0)&&(r=t.sourceLayer,a=t.nodeIndex),0===r.inboundNodes.length)return[t];{let t=r.inboundNodes[a];if(0===t.inboundLayers.length)return t.inputTensors;{let r=[];for(let a=0;at.name),p=[],m=r.names();for(let t of u)-1!==m.indexOf(t)?p.push(r.getValue(t)):p.push(null);null!=n&&(n.maxNumTensors=-1/0,n.minNumTensors=1/0);let y=u.join(",")+"|"+r.names().sort().join(","),_=o5.get(y);if(null==_){let t=getTopologicalSortAndRecipientCounts(l,r);_=t.sorted,s=t.recipientCounts,o5.put(y,_),o8.put(y,s)}s={},i||Object.assign(s,o8.get(y));let w=new FeedDict(r);for(let t=0;t<_.length;++t){if(null!=n){let t=memory().numTensors;t>n.maxNumTensors&&(n.maxNumTensors=t),t0,()=>"Expected at least one fetch, got none");let a=[],n={};if(1===t.length){let s=getTopologicalSortAndRecipientCountsForOneFetch(t[0],r);a=s.sorted,n=s.recipientMap}else{let s=new Set;for(let i of t){let{sorted:t,recipientMap:o}=getTopologicalSortAndRecipientCountsForOneFetch(i,r);for(let r of t)s.has(r.name)||(a.push(r),s.add(r.name));for(let t in o)null==n[t]&&(n[t]=new Set),o[t].forEach(r=>n[t].add(r))}}return{sorted:a,recipientCounts:recipientMap2Counts(n)}}function recipientMap2Counts(t){let r={};for(let a in t)r[a]=t[a].size;return r}function getTopologicalSortAndRecipientCountsForOneFetch(t,r){let a=new Set,n=[],s={};for(let t of r.names())a.add(t);let i=[],o=[];for(i.push(t);i.length>0;){let t=i[i.length-1];if(a.has(t.name)){i.pop();continue}let r=o[o.length-1]===i.length-1;if(0===t.inputs.length||r)i.pop(),n.push(t),a.add(t.name),r&&o.pop();else for(let r of(o.push(i.length-1),t.inputs))null==s[r.name]&&(s[r.name]=new Set),s[r.name].add(t.name),a.has(r.name)||i.push(r)}return{sorted:n,recipientMap:s}}function getNodeOutputs(t){let r;if(1===t.sourceLayer.inboundNodes.length)r=t.sourceLayer.output;else{let a=null;for(let r=0;raP(nH(aD(t,t),r,!0)))}eV.registerFlag("TOPOLOGICAL_SORT_CACHE_MAX_ENTRIES",()=>100,updateCacheMaxEntries);let Constraint=class Constraint extends Serializable{getConfig(){return{}}};let constraints_MaxNorm=class constraints_MaxNorm extends Constraint{constructor(t){super(),this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=null!=t.maxValue?t.maxValue:this.defaultMaxValue,this.axis=null!=t.axis?t.axis:this.defaultAxis}apply(t){return globals_tidy(()=>{let r=calcL2Norms(t,this.axis);return aD(t,aF(nf(r,0,this.maxValue),a$(epsilon(),r)))})}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}};constraints_MaxNorm.className="MaxNorm",registerClass(constraints_MaxNorm);let constraints_UnitNorm=class constraints_UnitNorm extends Constraint{constructor(t){super(),this.defaultAxis=0,this.axis=null!=t.axis?t.axis:this.defaultAxis}apply(t){return globals_tidy(()=>aF(t,a$(epsilon(),calcL2Norms(t,this.axis))))}getConfig(){return{axis:this.axis}}};constraints_UnitNorm.className="UnitNorm",registerClass(constraints_UnitNorm);let constraints_NonNeg=class constraints_NonNeg extends Constraint{apply(t){return sJ(t)}};constraints_NonNeg.className="NonNeg",registerClass(constraints_NonNeg);let constraints_MinMaxNorm=class constraints_MinMaxNorm extends Constraint{constructor(t){super(),this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=null!=t.minValue?t.minValue:this.defaultMinValue,this.maxValue=null!=t.maxValue?t.maxValue:this.defaultMaxValue,this.rate=null!=t.rate?t.rate:this.defaultRate,this.axis=null!=t.axis?t.axis:this.defaultAxis}apply(t){return globals_tidy(()=>{let r=calcL2Norms(t,this.axis),a=a$(aD(this.rate,nf(r,this.minValue,this.maxValue)),aD(1-this.rate,r));return aD(t,aF(a,a$(epsilon(),r)))})}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}};constraints_MinMaxNorm.className="MinMaxNorm",registerClass(constraints_MinMaxNorm);let o7={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function deserializeConstraint(t,r={}){return deserializeKerasObject(t,SerializationMap.getMap().classNameMap,r,"constraint")}function getConstraint(t){return null==t?null:"string"==typeof t?deserializeConstraint({className:t in o7?o7[t]:t,config:{}}):t instanceof Constraint?t:deserializeConstraint(t)}async function resolveScalarsInLogs(t){if(null==t)return;let r=[],a=[],n=[];for(let s in t){let i=t[s];"number"!=typeof i&&(r.push(i.data()),a.push(s),n.push(i))}if(r.length>0){let s=await Promise.all(r);for(let r=0;ra$(this.totals[t],aD(n,a)));this.totals[t]=s,null!=r&&r.dispose()}}}async onEpochEnd(t,r){if(null!=r)for(let t of this.params.metrics)null!=this.totals[t]&&("number"==typeof this.totals[t]?r[t]=this.totals[t]/this.seen:globals_tidy(()=>{let a=aD(aF(1,this.seen),this.totals[t]);r[t]=a,this.totals[t].dispose(),keep(r[t])}))}};let History=class History extends BaseCallback{async onTrainBegin(t){this.epoch=[],this.history={}}async onEpochEnd(t,r){for(let a in null==r&&(r={}),this.epoch.push(t),r)null==this.history[a]&&(this.history[a]=[]),this.history[a].push(r[a])}async syncData(){let t=[],r=[],a=[];for(let n in this.history){let s=this.history[n];for(let i=0;inew CustomCallback(t,r))}let base_callbacks_CallbackConstructorRegistry=class base_callbacks_CallbackConstructorRegistry{static registerCallbackConstructor(t,r){assert(t>=0&&Number.isInteger(t),()=>`Verbosity level is expected to be an integer >= 0, but got ${t}`),base_callbacks_CallbackConstructorRegistry.checkForDuplicate(r),null==base_callbacks_CallbackConstructorRegistry.constructors[t]&&(base_callbacks_CallbackConstructorRegistry.constructors[t]=[]),base_callbacks_CallbackConstructorRegistry.constructors[t].push(r)}static checkForDuplicate(t){for(let r in base_callbacks_CallbackConstructorRegistry.constructors)base_callbacks_CallbackConstructorRegistry.constructors[+r].forEach(r=>{if(r===t)throw new errors_ValueError("Duplicate callback constructor.")})}static clear(){base_callbacks_CallbackConstructorRegistry.constructors={}}static createCallbacks(t){let r=[];for(let a in base_callbacks_CallbackConstructorRegistry.constructors){let n=+a;t>=n&&r.push(...base_callbacks_CallbackConstructorRegistry.constructors[n])}return r.map(t=>new t)}};function configureCallbacks(t,r,a,n,s,i,o,l,u){let p=new History,m=[new BaseLogger,...base_callbacks_CallbackConstructorRegistry.createCallbacks(r)];null!=t&&m.push(...t),m.push(p);let y=new CallbackList(m);return y.setParams({epochs:a,initialEpoch:n,samples:s,steps:i,batchSize:o,verbose:r,doValidation:l,metrics:u}),{callbackList:y,history:p}}function serialization_deserialize(t,r={},a=!1){return deserializeKerasObject(t,SerializationMap.getMap().classNameMap,r,"layer",a)}function l2Normalize(t,r){return globals_tidy(()=>{var a;"float32"!==t.dtype&&(t=aE(t,"float32"));let n=nH(aD(a=t,a),r,!0),s=fill(n.shape,epsilon()),i=aP(aU(n,s));return aF(t,i)})}function losses_meanSquaredError(t,r){return globals_tidy(()=>{var a;return sv(aD(a=aB(r,t),a),-1)})}function meanAbsoluteError(t,r){return globals_tidy(()=>sv(aW(aB(r,t)),-1))}function meanAbsolutePercentageError(t,r){return globals_tidy(()=>{let a=aB(t,r),n=nf(aW(t),epsilon(),Number.MAX_VALUE);return aD(100,sv(aW(aF(a,n)),-1))})}function categoricalCrossentropy(t,r,a=!1){return globals_tidy(()=>{if(a)r=io(r);else{let t=nH(r,r.shape.length-1,!0);r=aF(r,t)}return r=nf(r,epsilon(),1-epsilon()),si(nH(aD(aE(t,"float32"),sn(r)),r.shape.length-1))})}function sparseCategoricalCrossentropy(t,r,a=!1){return globals_tidy(()=>{let n=aE(n1(tfjs_backend_flatten(t)),"int32"),s=(r=nf(r,epsilon(),1-epsilon())).shape;return categoricalCrossentropy(a6(sC(n,s[s.length-1]),s),r,a)})}function sigmoidCrossEntropyWithLogits(t,r){if(!arraysEqual(t.shape,r.shape))throw new errors_ValueError(`logits and labels must have the same shape, but got shapes ${JSON.stringify(t.shape)} and ${JSON.stringify(r.shape)}`);return globals_tidy(()=>{let a=sJ(r),n=si(aW(r));return a$(aB(a,aD(r,t)),ss(nY(n)))})}function binaryCrossentropy(t,r){return globals_tidy(()=>{let a;return sv(sigmoidCrossEntropyWithLogits(t,a=sn(aF(a=nf(r,epsilon(),1-epsilon()),aB(1,a)))),-1)})}function cosineProximity(t,r){return globals_tidy(()=>si(nH(aD(l2Normalize(t,-1),l2Normalize(r,-1)),-1)))}base_callbacks_CallbackConstructorRegistry.constructors={};let o9={meanSquaredError:losses_meanSquaredError,meanAbsoluteError,meanAbsolutePercentageError,meanSquaredLogarithmicError:function(t,r){return globals_tidy(()=>{var a;return sv(aD(a=aB(sn(a$(1,nf(r,epsilon(),Number.MAX_VALUE))),sn(a$(1,nf(t,epsilon(),Number.MAX_VALUE)))),a),-1)})},squaredHinge:function(t,r){return globals_tidy(()=>{let a=aU(0,aB(1,aD(t,r)));return sv(aD(a,a),-1)})},hinge:function(t,r){return globals_tidy(()=>sv(aU(0,aB(1,aD(t,r))),-1))},categoricalHinge:function(t,r){return globals_tidy(()=>{let a=nH(aD(t,r),-1),n=nj(aD(aB(1,t),r),-1);return aU(0,a$(1,aB(n,a)))})},logcosh:function(t,r){return globals_tidy(()=>{let a=Math.log(2),n=aB(r,t);return sv(aB(a$(n,so(aD(-2,n))),a),-1)})},categoricalCrossentropy,sparseCategoricalCrossentropy,binaryCrossentropy,kullbackLeiblerDivergence:function(t,r){return globals_tidy(()=>{let a=nf(t,epsilon(),1);return nH(aD(t,sn(aF(a,nf(r,epsilon(),1)))),-1)})},poisson:function(t,r){return globals_tidy(()=>{let a=sn(a$(epsilon(),r));return sv(aB(r,aD(t,a)),-1)})},cosineProximity};function get(t){if("string"!=typeof t)return t;{if(t in o9)return o9[t];let r=`Unknown loss ${t}`;throw t.toLowerCase().includes("softmaxcrossentropy")&&(r=`Unknown loss ${t}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new errors_ValueError(r)}}function binaryAccuracy(t,r){return globals_tidy(()=>{let a=aD(.5,sE(r)),n=aE(n3(r,a),t.dtype);return sv(nM(t,n),-1)})}function categoricalAccuracy(t,r){return globals_tidy(()=>aE(nM(aJ(t,-1),aJ(r,-1)),"float32"))}function truePositives(t,r){return globals_tidy(()=>aE(nH(sh(nM(t,1),nM(r,1))),"float32"))}function falsePositives(t,r){return globals_tidy(()=>aE(nH(sh(nM(t,0),nM(r,1))),"float32"))}function metrics_binaryCrossentropy(t,r){return binaryCrossentropy(t,r)}function sparseCategoricalAccuracy(t,r){return t.rank===r.rank&&(t=im(t,[t.rank-1])),(r=aJ(r,-1)).dtype!==t.dtype&&(r=aE(r,t.dtype)),aE(nM(t,r),"float32")}let le=categoricalCrossentropy,lt=sparseCategoricalCrossentropy,lr={binaryAccuracy,categoricalAccuracy,precision:function(t,r){return globals_tidy(()=>{let a=truePositives(t,r),n=a$(a,falsePositives(t,r));return aE(nL(n3(n,0),aF(a,n),0),"float32")})},categoricalCrossentropy:le,sparseCategoricalCrossentropy:lt,mse:losses_meanSquaredError,MSE:losses_meanSquaredError,mae:meanAbsoluteError,MAE:meanAbsoluteError,mape:meanAbsolutePercentageError,MAPE:meanAbsolutePercentageError,cosine:cosineProximity};function metrics_get(t){if("string"==typeof t&&t in lr)return lr[t];if("string"!=typeof t&&null!=t)return t;throw new errors_ValueError(`Unknown metric ${t}`)}function getLossOrMetricName(t){if(generic_utils_assert(null!==t,`Unknown LossOrMetricFn ${t}`),"string"==typeof t)return t;{let r;for(let a of Object.keys(o9))if(o9[a]===t){r=a;break}if(void 0!==r)return r;for(let a of Object.keys(lr))if(lr[a]===t){r=a;break}return void 0!==r?r:t.name}}function getOptimizer(t){let r={Adagrad:()=>OptimizerConstructors.adagrad(.01),Adadelta:()=>OptimizerConstructors.adadelta(1,.95,epsilon()),Adam:()=>OptimizerConstructors.adam(.001,.9,.999,epsilon()),Adamax:()=>OptimizerConstructors.adamax(.002,.9,.999,epsilon(),0),RMSProp:()=>OptimizerConstructors.rmsprop(.001,.9,0,epsilon()),SGD:()=>OptimizerConstructors.sgd(.01)};if(r.adagrad=r.Adagrad,r.adadelta=r.Adadelta,r.adam=r.Adam,r.adamax=r.Adamax,r.rmsprop=r.RMSProp,r.sgd=r.SGD,t in r)return r[t]();throw new errors_ValueError(`Unknown Optimizer ${t}`)}function checkUserDefinedMetadata(t,r,a=!1){if(null==t||"object"!=typeof t||Object.getPrototypeOf(t)!==Object.prototype||!plainObjectCheck(t))throw Error("User-defined metadata is expected to be a JSON object, but is not.");if(a){let a=JSON.stringify(t);a.length>1048576&&console.warn(`User-defined metadata of model "${r}" is too large in size (length=${a.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= 1048576.`)}}function plainObjectCheck(t){if(null===t)return!0;if("object"==typeof t)if(Object.getPrototypeOf(t)===Object.prototype){for(let r of Object.keys(t))if("string"!=typeof r||!plainObjectCheck(t[r]))return!1;return!0}else{if(!Array.isArray(t))return!1;for(let r of t)if(!plainObjectCheck(r))return!1;return!0}{let r=typeof t;return"string"===r||"number"===r||"boolean"===r}}function printSummary(t,r,a,n=console.log){let s,i=isModelSequentialLike(t),o=["Layer (type)","Input Shape","Output shape","Param #"];if(i?(r=r||90,a=a||[.32,.61,.89,1]):(r=r||115,a=a||[.24,.48,.7,.8,1]),a[a.length-1]<=1&&(a=a.map(t=>Math.floor(r*t))),!i)for(let r in o.push("Receives inputs"),s=[],t.nodesByDepth)s.push(...t.nodesByDepth[r]);n("_".repeat(r)),printRow(o,a,n),n("=".repeat(r));let l=t.layers;for(let t=0;t1||1===t.length&&t[0].inboundLayers.length>1){r=!1;break}n.push(...t)}if(r)for(let a of t.layers){let t=!1;for(let s of a.inboundNodes)if(-1!==n.indexOf(s))if(t){r=!1;break}else t=!0;if(!r)break}return r}function printRow(t,r,a=console.log){let n="";for(let a=0;a0&&(n=n.slice(0,n.length-1)+" "),n+=t[a],n=n.slice(0,r[a]),n+=" ".repeat(r[a]-n.length);a(n)}function printLayerSummary(t,r,a){let n,s;try{s=t.inboundNodes.map(t=>JSON.stringify(t.inputShapes)).join(",")}catch{s="multiple"}try{n=JSON.stringify(t.outputShape)}catch{n="multiple"}let i=t.name,o=t.getClassName();printRow([`${i} (${o})`,s,n,t.countParams().toString()],r,a)}function printLayerSummaryWithConnections(t,r,a,n){let s,i;try{i=t.inboundNodes.map(t=>JSON.stringify(t.inputShapes)).join(",")}catch{i="multiple"}try{s=JSON.stringify(t.outputShape)}catch{s="multiple"}let o=[];for(let r of t.inboundNodes)if(null==a||!(a.length>0)||-1!==a.indexOf(r))for(let t=0;tt.name)}`);for(const t of(generic_utils_unique(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map(t=>t.name)}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[],this.outputs)){const r=t.sourceLayer,a=t.nodeIndex,n=t.tensorIndex;this.outputLayers.push(r),this.outputLayersNodeIndices.push(a),this.outputLayersTensorIndices.push(n)}for(const t of this.inputs){const r=t.sourceLayer,a=t.nodeIndex,n=t.tensorIndex;generic_utils_assert(0===a,"input layer has >1 nodes"),generic_utils_assert(0===n,"input layer has >1 tensors"),this.inputLayers.push(r),this.inputLayersNodeIndices.push(a),this.inputLayersTensorIndices.push(n)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let r=0;rt.shape),this.internalOutputShapes=this.outputs.map(t=>t.shape);const r={},a={},n={},s={},i={},o=[],buildMapOfGraph=(t,r,a,n,s,l)=>{(null==n||null==s||null==l)&&(n=t.sourceLayer,s=t.nodeIndex,l=t.tensorIndex);let u=n.inboundNodes[s];if(-1!==a.indexOf(u))throw new RuntimeError(`The tensor ${t.name} at layer "${n.name}" is part of a cycle.`);if(-1!==r.indexOf(u))return;this.containerNodes.add(Container.nodeKey(n,s)),n.id in i||(i[n.id]=Object.keys(i).length),-1===a.indexOf(u)&&a.push(u);let p=u.inboundLayers.length;for(let t=0;t=0;)a.splice(a.indexOf(u),1);o.push(u)},l=[],u=[];for(const t of this.outputs)buildMapOfGraph(t,l,u);for(const t of o.slice().reverse()){a[t.id]=t,t.id in r||(r[t.id]=0);let i=r[t.id];i=Math.max(i,null==n[t.outboundLayer.id]?0:n[t.outboundLayer.id]),n[t.outboundLayer.id]=i,s[t.outboundLayer.id]=t.outboundLayer,r[t.id]=i;for(let n=0;nparseInt(t,10)).sort(reverseNumberCompare);for(const t of(this.layers=[],y)){const r=m[t];for(const t of(r.sort((t,r)=>{let a=i[t.id],n=i[r.id];return an)}),r))t instanceof Container&&this.internalContainerRefs.push(t),this.layers.push(t)}this.layersByDepth=m,y=Object.keys(p).map(t=>parseInt(t,10)).sort(reverseNumberCompare);const _=this.inputs.slice(),w=[];for(const t of y)for(const r of p[t]){const t=r.outboundLayer;if(null!=t){for(const a of r.inputTensors)if(-1===_.indexOf(a))throw new RuntimeError(`Graph disconnected: cannot obtain value for tensor ${a} at layer "${t.name}". The following previous layers were accessed without issue: ${w}`);for(const t of r.outputTensors)_.push(t);w.push(t.name)}}this.nodesByDepth=p;const I=this.layers.map(t=>t.name);for(const t of I){const r=I.filter(r=>r===t).length;if(1!==r)throw new RuntimeError(`The name "${t}" is used ${r} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(I))}this.outboundNodes=[],this.inboundNodes=[],new Node({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map(t=>null),outputMasks:this.outputs.map(t=>null),inputShapes:this.inputs.map(t=>t.shape),outputShapes:this.outputs.map(t=>t.shape)}),this.built=!0,this._refCount=1}assertNotDisposed(){if(0===this._refCount)throw Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();let t={refCountAfterDispose:null,numDisposedVariables:0};if(0==--this._refCount){for(let r of this.layers)t.numDisposedVariables+=r.dispose().numDisposedVariables;for(let r of this.internalContainerRefs)t.numDisposedVariables+=r.dispose().numDisposedVariables}return t.refCountAfterDispose=this._refCount,t}get trainable(){return this.trainable_}set trainable(t){this.layers.forEach(r=>{r._trainableWeights.forEach(r=>r.trainable=t)}),this.trainable_=t}get trainableWeights(){if(this._trainableWeights.length>0)throw new errors_ValueError("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let t=[];for(let r of this.layers)t=t.concat(r.trainableWeights);return t}get nonTrainableWeights(){let t=[];for(let r of this.layers)t.push(...r.nonTrainableWeights);if(!this.trainable){let r=[];for(let t of this.layers)r.push(...t.trainableWeights);return r.concat(t)}return t}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(t,r=!0){let a={},n=0,s=(t=>{let r=Object.keys(t);if(0===r.length)return!1;let a=r[0].split("/");return!isNaN(parseInt(a[a.length-1],10))})(t);for(let r of(s&&this.parseWeights(t),this.layers))for(let[t,i]of r.weights.entries()){let r=s?`${i.name.split("/").slice(0,-1).join("/")+"/"}${t}`:i.originalName;if(null!=a[r])throw new errors_ValueError(`Duplicate weight name: ${r}`);a[r]=i,n++}let i=[];for(let n in t){let s=n;if(null==a[n]){let t=n.split("/");s=t.slice(0,-2).concat([t[t.length-1]]).join("/")}if(null!=a[s])i.push([a[s],t[n]]);else if(r)throw new errors_ValueError(`Provided weight data has no target variable: ${n}`);delete a[s]}if(r){let t=[];for(let r in a)t.push(r);if(t.length>0)throw new errors_ValueError(`${t.length} of ${n} weights are not set: ${t}`)}batchSetValue(i)}parseWeights(t){for(let r in Object.keys(t)){let a=r.split("/"),n=["vars","layer_checkpoint_dependencies"],s=a.map(t=>t.startsWith("_")?t.slice(1):t).filter(t=>!n.includes(t)).join("/");s!==r&&(t[s]=t[r],delete t[r])}}updatedConfig(){let t=this.getConfig(),r={};return r.className=this.getClassName(),r.config=t,r.kerasVersion=`tfjs-layers ${la}`,r.backend="TensorFlow.js",r}toJSON(t,r=!0){let a=convertTsToPythonic(this.updatedConfig());return r?JSON.stringify(a):a}call(t,r){return globals_tidy(()=>{t=toList(t);let a=new FeedDict;for(let r=0;r{let a;return t=toList(t),a=null==r?pyListRepeat(null,t.length):toList(r),this.runInternalGraph(t,a)[1]})}computeOutputShape(t){let r=normalizeShapeList(t);if(r.length!==this.inputLayers.length)throw new errors_ValueError(`Invalid inputShape argument ${t}: model has ${this.inputLayers.length} tensor inputs.`);let a={};for(let t=0;tparseInt(t,10)).sort(reverseNumberCompare);if(n.length>1)for(let t of n)for(let r of this.nodesByDepth[t]){let t=r.outboundLayer;if(-1!==this.inputLayers.map(t=>t.id).indexOf(t.id))continue;let n=[];for(let t=0;tparseInt(t,10)).sort(reverseNumberCompare))for(let r of this.nodesByDepth[t]){let t=r.outboundLayer,n=r.inputTensors,s=r.outputTensors,i=[];for(let t of n)t.id in a&&i.push(a[t.id]);if(i.length===n.length){let n,o,l,u,p={};if(null!=r.callArgs&&(p=r.callArgs),1===i.length){let[r,a]=i[0];null==p.mask&&(p.mask=a),l=toList(t.call(r,p)),u=toList(t.computeMask(r,a)),n=[r],o=[a]}else n=i.map(t=>t[0]),o=i.map(t=>t[1]),null==p.mask&&(p.mask=o),l=toList(t.call(n,p)),u=toList(t.computeMask(n,o));if(t.activityRegularizer)throw new errors_NotImplementedError("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let t=0;t{let t=[];for(let r of this.layers)for(let a=0;a0){let t=[];for(let a=0;a0&&t.apply(singletonOrArray(n),a)}function processLayer(t){let a=t.name,i=serialization_deserialize(t,null!=r.customObjects?r.customObjects:{});i.setFastWeightInitDuringBuild(n),s[a]=i,t.inboundNodes.forEach(t=>{if(!(t instanceof Array))throw new errors_ValueError(`Corrupted configuration, expected array for nodeData: ${t}`);addUnprocessedNode(i,t)})}let o=r.name,l=r.layers;for(let t of l)processLayer(t);for(;!isObjectEmpty(i);)for(let t of l){let r=s[t.name];if(r.name in i){let t=i[r.name];for(let a of(delete i[r.name],t))processNode(r,a)}}let u=[],p=[];for(let t of r.inputLayers){let r=t[0],a=t[1],n=t[2];generic_utils_assert(r in s);let i=s[r].inboundNodes[a].outputTensors;u.push(i[n])}for(let t of r.outputLayers){let r=t[0],a=t[1],n=t[2];generic_utils_assert(r in s);let i=s[r].inboundNodes[a].outputTensors;p.push(i[n])}return new t({inputs:u,outputs:p,name:o})}get stateful(){if(this._stateful)throw new errors_ValueError("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(let t of this.layers)if(t.stateful)return!0;return!1}resetStates(){globals_tidy(()=>{this.layers.forEach(t=>{t.stateful&&t.resetStates()})})}};function standardizeSampleOrClassWeights(t,r,a){let n=r.length;if(null==t||Array.isArray(t)&&0===t.length)return r.map(t=>null);if(1===n)if(Array.isArray(t)&&1===t.length)return t;else if("object"==typeof t&&r[0]in t)return[t[r[0]]];else return[t];if(Array.isArray(t)){if(t.length!==n)throw Error(`Provided ${a} is an array of ${t.length} element(s), but the model has ${n} outputs. Make sure a set of weights is provided for each model output.`);return t}if("object"==typeof t&&Object.keys(t).length>0&&"object"==typeof t[Object.keys(t)[0]]){let a=[];return r.forEach(r=>{r in t?a.push(t[r]):a.push(null)}),a}throw Error(`The model has multiple (${n}) outputs, so ${a} must be either an array with ${n} elements or an object with ${r} keys. Provided ${a} not understood: ${JSON.stringify(t)}`)}function standardizeClassWeights(t,r){return standardizeSampleOrClassWeights(t,r,"classWeight")}async function standardizeWeights(t,r,a,n){if(null!=r||null!=n)throw Error("Support sampleWeight is not implemented yet");if(null==a)return null;{let r=globals_tidy(()=>{if(1===t.shape.length)return aA(t);if(2===t.shape.length)if(t.shape[1]>1)return aJ(t,1);else if(1===t.shape[1])return a6(t,[t.shape[0]]);else throw Error(`Encountered unexpected last-dimension size (${t.shape[1]}) during handling of class weights. The size is expected to be >= 1.`);throw Error(`Unexpected rank of target (y) tensor (${t.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)}),n=Array.from(await r.data());globals_dispose(r);let s=[];return n.forEach(t=>{if(null==a[t])throw Error(`classWeight must contain all classes in the training data. The class ${t} exists in the data but not in classWeight`);s.push(a[t])}),tensor1d(s,"float32")}}function training_utils_computeWeightedLoss(t,r){return aD(t,r)}function standardizeDataIteratorOutput(t,r){let a,n;a=r.xs,n=r.ys,assert(null!=a&&null!=n,()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${r}`);let s=flattenTensorOrArrayOrMap("input",t.inputNames,a),i=flattenTensorOrArrayOrMap("output",t.outputNames,n),o=s[0].shape[0];assert(s.length===t.inputs.length,()=>`LayersModel has ${t.inputs.length} inputs, but the dataset provides ${s.length} inputs. (Expected input keys: ${JSON.stringify(t.inputNames)})`),assert(i.length===t.outputs.length,()=>`LayersModel has ${t.outputs.length} outputs, but the dataset provides ${i.length} outputs. (Expected output keys: ${JSON.stringify(t.outputNames)})`);for(let r=0;r`Batch size mismatch: input ${t.inputNames[r]} has ${s[r].shape[0]}; expected ${o} based on input ${t.inputNames[0]}.`);for(let r=0;r`Batch size mismatch: output ${t.outputNames[r]} has ${i[r].shape[0]}; expected ${o} based on input ${t.inputNames[0]}.`);return{xs:s,ys:i}}function flattenTensorOrArrayOrMap(t,r,a){if(a instanceof tensor_Tensor)return[a];if(Array.isArray(a))return assert(a.length===r.length,()=>`Received an array of ${a.length} Tensors, but expected ${r.length} to match the ${t} keys ${r}.`),a;{let n=[];for(let s of r){if(null==a[s])throw new errors_ValueError(`The feature data generated by the dataset lacks the required ${t} key '${s}'.`);n.push(a[s])}return n}}function standardizeTensorValidationData(t){if(3===t.length)throw new errors_NotImplementedError("Validation with sample weights is not implemented yet.");return{xs:t[0],ys:t[1]}}async function fitDataset(t,r,a){let n=null!=a.batchesPerEpoch;if(assert(null!=t.optimizer,()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig)."),assert(null!=a,()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call."),assert(null!=a.epochs&&a.epochs>0&&Number.isInteger(a.epochs),()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${a.epochs}`),assert(!n||a.batchesPerEpoch>0&&Number.isInteger(a.batchesPerEpoch),()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${a.batchesPerEpoch}`),assert(null==a.validationSplit,()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead."),t.isTraining)throw Error("Cannot start training because another fit() call is ongoing.");t.isTraining=!0;try{let s,i,o,l=null!=a.validationData;if(l)if(isDatasetObject(a.validationData))assert(null==a.validationBatches||a.validationBatches>0&&Number.isInteger(a.validationBatches),()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${a.validationBatches}`);else{let t=standardizeTensorValidationData(a.validationData);s=t.xs,i=t.ys}let u=t.makeTrainFunction(),p=t.getDedupedMetricsNames();o=l?p.slice().concat(p.map(t=>"val_"+t)):p.slice();let m=standardizeCallbacks(a.callbacks,a.yieldEvery),y=null==a.verbose?1:a.verbose,{callbackList:_,history:w}=configureCallbacks(m,y,a.epochs,null,null,getStepsPerEpoch(r,a),null,l,o);_.setModel(t),t.history=w,await _.onTrainBegin(),t.stopTraining_=!1;let I=null==a.initialEpoch?0:a.initialEpoch,C=await r.iterator();for(;I=a.batchesPerEpoch:r.done){if(l){let r;r=isDatasetObject(a.validationData)?toList(await t.evaluateDataset(a.validationData,{batches:a.validationBatches})):toList(t.evaluate(s,i,{batchSize:null==a.validationBatchSize?32:a.validationBatchSize,verbose:0}));for(let a=0;a0)throw new errors_NotImplementedError("Verbose mode is not implemented yet.");assert(!n||a.batches>0&&Number.isInteger(a.batches),()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(a.batches)}`);let o=isLazyIteratorObject(r)?r:await r.iterator(),l=0,u=0;for(;!n||u{if(r.value){let{xs:a,ys:n}=standardizeDataIteratorOutput(t,r.value),o=a.concat(n),p=globals_tidy(()=>s(o));if(globals_dispose(o),0===u)for(let t=0;ta$(i[t],aD(m,r))),u>0&&globals_dispose(a)}globals_dispose(p),l+=m,++u}return i}),r.done){n&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${a.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let t=0;t0&&Number.isInteger(t),()=>`batchSize is required to be a positive integer, but got ${t}`)}function sliceArrays(t,r,a){return null==t?[null]:Array.isArray(t)?t.map(t=>sliceAlongFirstAxis(t,r,a-r)):sliceAlongFirstAxis(t,r,a-r)}function sliceArraysByIndices(t,r){return globals_tidy(()=>null==t?null:Array.isArray(t)?t.map(t=>sliceArraysByIndices(t,r)):tfjs_backend_gather(t,"int32"===r.dtype?r:aE(r,"int32")))}function makeBatches(t,r){let a=[],n=0,s=null;for(;n=t&&(s=t),a.push([n,s]),n=s;return a}function ensureTensorsRank2OrHigher(t){let r=[];t instanceof tensor_Tensor&&(t=[t]);for(let a=0;aa.push(t.id));else if(null!=r)for(let t in r){let n=r[t];a.push(n.id)}let n=[];if(t instanceof tensor_Tensor)-1===a.indexOf(t.id)&&n.push(t);else if(Array.isArray(t))t.forEach(t=>{-1===a.indexOf(t.id)&&n.push(t)});else if(null!=t)for(let r in t){let s=t[r];-1===a.indexOf(s.id)&&n.push(s)}n.forEach(t=>{t.isDisposed||t.dispose()})}function isDataTensor(t){return t instanceof tensor_Tensor}function isDataArray(t){return Array.isArray(t)}function isDataDict(t){return!isDataTensor(t)&&!isDataArray(t)}function standardizeInputData(t,r,a,n=!0,s=""){let i;if(null==r||0===r.length){if(null!=t){let r=!1;if(isDataArray(t)&&t.length>0)r=!0;else if(isDataDict(t)){for(let a in t)if(t.hasOwnProperty(a)){r=!0;break}}else r=!0;if(r)throw new errors_ValueError(`Error when checking model ${s} expected no data, but got ${t}`)}return[]}if(null==t)return r.map(t=>null);if(isDataDict(t))for(let a of(i=[],r)){if(null==t[a])throw new errors_ValueError(`No data provided for "${a}". Need data for each key in: ${r}`);i.push(t[a])}else if(isDataArray(t)){if(t.length!==r.length)throw new errors_ValueError(`Error when checking model ${s}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${r.length} Tensor(s), but instead got the following list of Tensor(s): ${t}`);i=t}else{if(r.length>1)throw new errors_ValueError(`The model ${s} expects ${r.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${t.shape}`);i=[t]}if(i=ensureTensorsRank2OrHigher(i),null!=a)for(let t=0;t=0&&i!==l)throw new errors_ValueError(`${s} expected a batch of elements where each example has shape [${a[t].slice(1,a[t].length)}] (i.e.,tensor shape [*,${a[t].slice(1,a[t].length)}]) but the ${s} received an input with ${o.shape[0]} examples, each with shape [${o.shape.slice(1,o.shape.length)}] (tensor shape [${o.shape}])`)}}return i}function checkArrayLengths(t,r,a){let n=generic_utils_unique(t.map(t=>t.shape[0]));n.sort();let s=generic_utils_unique(r.map(t=>t.shape[0]));if(s.sort(),n.length>1)throw new errors_ValueError(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map(t=>t.shape))}`);if(s.length>1)throw new errors_ValueError(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(r.map(t=>t.shape))}`);if(n.length>0&&s.length>0&&!arraysEqual(n,s))throw new errors_ValueError(`Input Tensors should have the same number of samples as target Tensors. Found ${n[0]} input sample(s) and ${s[0]} target sample(s).`)}function checkLossAndTargetCompatibility(t,r,a){let n=[losses_meanSquaredError,binaryCrossentropy,categoricalCrossentropy];for(let s=0;s1)throw new errors_ValueError(`The model expects ${r.length} ${s} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(t.shape)}.`);i=[t]}if(null!=a)for(let t=0;t[]);if("string"==typeof t||"function"==typeof t)a=[t];else if(Array.isArray(t)||"object"==typeof t)a=t;else throw TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${t}`);if(Array.isArray(a))return r.map(t=>a);{let t=[];for(let n of r){let r=a.hasOwnProperty(n)?a[n]:[];Array.isArray(r)||(r=[r]),t.push(r)}return t}}let training_LayersModel=class training_LayersModel extends Container{constructor(t){super(t),this.isTraining=!1}summary(t,r,a=console.log){if(!this.built)throw new errors_ValueError("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");printSummary(this,t,r,a)}compile(t){if(null==t.loss&&(t.loss=[]),this.loss=t.loss,"string"==typeof t.optimizer)this.optimizer_=getOptimizer(t.optimizer),this.isOptimizerOwned=!0;else{if(!(t.optimizer instanceof Optimizer))throw new errors_ValueError("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=t.optimizer,this.isOptimizerOwned=!1}let r=[];if(Array.isArray(t.loss)||"string"==typeof t.loss||"function"==typeof t.loss)if(Array.isArray(t.loss)){if(t.loss.length!==this.outputs.length)throw new errors_ValueError(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${t.loss}.`);r=t.loss.map(t=>get(t))}else{let a=get(t.loss);this.outputs.forEach(t=>{r.push(a)})}else{for(let r in t.loss=t.loss,t.loss)if(-1===this.outputNames.indexOf(r))throw new errors_ValueError(`Unknown entry in loss dictionary: "${r}". Only expected the following keys: ${this.outputNames}`);for(let a of this.outputNames)null==t.loss[a]&&console.warn(`Output "${a}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${a} during training`),r.push(get(t.loss[a]))}this.lossFunctions=r,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let t=0;t{for(let t=0;t1&&(this.metricsTensors.push([r,t]),this.metricsNames.push(this.outputNames[t]+"_loss"))}});let n=collectMetrics(t.metrics,this.outputNames),appendMetric=(t,r,a)=>{this.outputNames.length>1&&(r=this.outputNames[t]+"_"+r),this.metricsNames.push(r),this.metricsTensors.push([a,t])};nameScope("metric",()=>{for(let t=0;t{let a,n,s;for(let i of r){let r;if("string"==typeof i&&-1!==["accuracy","acc","crossentropy","ce"].indexOf(i)){let r,o=this.internalOutputShapes[t];1===o[o.length-1]||this.lossFunctions[t]===binaryCrossentropy?-1!==["accuracy","acc"].indexOf(i)?n=binaryAccuracy:-1!==["crossentropy","ce"].indexOf(i)&&(n=metrics_binaryCrossentropy):this.lossFunctions[t]===sparseCategoricalCrossentropy?-1!==["accuracy","acc"].indexOf(i)?n=sparseCategoricalAccuracy:-1!==["crossentropy","ce"].indexOf(i)&&(n=lt):-1!==["accuracy","acc"].indexOf(i)?n=categoricalAccuracy:-1!==["crossentropy","ce"].indexOf(i)&&(n=le),-1!==["accuracy","acc"].indexOf(i)?r="acc":-1!==["crossentropy","ce"].indexOf(i)&&(r="ce"),s=n,a=""+r}else s=metrics_get(i),a=""+getLossOrMetricName(i);nameScope(a,()=>{r=s}),appendMetric(t,a,r)}})(n[t])}}),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){null!=this.collectedTrainableWeights&&this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(t,r,a={}){let n=null==a.batchSize?32:a.batchSize;checkBatchSize(n);let s=this.standardizeUserDataXY(t,r,!0,n);try{let t=s[0].concat(s[1]);this.makeTestFunction();let r=this.testFunction,i=this.testLoop(r,t,n,a.verbose,a.steps);return singletonOrArray(i)}finally{disposeNewTensors(s[0],t),disposeNewTensors(s[1],r)}}async evaluateDataset(t,r){return this.makeTestFunction(),evaluateDataset(this,t,r)}checkNumSamples(t,r,a,n="steps"){let s;if(null!=a){if(s=null,null!=r)throw new errors_ValueError(`If ${n} is set, batchSize must be null or undefined.Got batchSize = ${r}`)}else if(null!=t)s=Array.isArray(t)?t[0].shape[0]:t.shape[0];else throw new errors_ValueError(`Either the input data should have a defined shape, or ${n} shoud be specified.`);return s}execute(t,r){if(Array.isArray(r)&&0===r.length)throw new errors_ValueError("`outputs` is an empty Array, which is not allowed.");let a=Array.isArray(r),n=a?r:[r],s=this.retrieveSymbolicTensors(n),i=new FeedDict;if(t instanceof tensor_Tensor&&(t=[t]),Array.isArray(t)){if(t.length!==this.inputs.length)throw new errors_ValueError(`The number of inputs provided (${t.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let r=0;rt.name);for(let n=0;n0){let a=[];throw r.forEach((r,n)=>{null==r&&a.push(t[n])}),new errors_ValueError(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(a)}`)}return r}predictLoop(t,r=32,a=!1){return globals_tidy(()=>{let n=this.checkNumSamples(t);if(a)throw new errors_NotImplementedError("Verbose predictLoop() is not implemented yet.");let s=makeBatches(n,r),i=this.outputs.map(t=>[]);for(let r=0;r{let a=sliceArrays(t,s[r][0],s[r][1]),n=[];if(Array.isArray(a))for(let t=0;ti[r].push(t));return singletonOrArray(i.map(t=>a7(t,0)))})}predict(t,r={}){let a=ensureTensorsRank2OrHigher(t);checkInputData(a,this.inputNames,this.feedInputShapes,!1);try{let t=null==r.batchSize?32:r.batchSize;return checkBatchSize(t),this.predictLoop(a,t)}finally{disposeNewTensors(a,t)}}predictOnBatch(t){checkInputData(t,this.inputNames,this.feedInputShapes,!0);let r=(Array.isArray(t)?t[0]:t).shape[0];return this.predictLoop(t,r)}standardizeUserDataXY(t,r,a=!0,n){if(null==this.optimizer_)throw new RuntimeError("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");let s=[];for(let t=0;t0&&t[0].shape[0]%n!=0)throw new errors_ValueError(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${n}. Found: ${t[0].shape[0]} sample(s).`);return[t,r]}async standardizeUserData(t,r,a,n,s=!0,i){let[o,l]=this.standardizeUserDataXY(t,r,s,i);if(null!=a)throw Error("sample weight is not supported yet.");let u=null;if(null!=n){let t=standardizeClassWeights(n,this.outputNames);u=[];for(let r=0;r{let i=this.checkNumSamples(r,a,s,"steps"),o=[];if(n>0)throw new errors_NotImplementedError("Verbose mode is not implemented yet.");if(null!=s)throw new errors_NotImplementedError("steps mode in testLoop() is not implemented yet");{let n=makeBatches(i,a),s=tensor1d(math_utils_range(0,i));for(let a=0;a1){let r=count(t.slice(0,a),n);s+=`_${r}`}r.push(s)}return r}makeTrainFunction(){return t=>{let r=[],a=t.slice(0,this.inputs.length),n=t.slice(this.inputs.length,this.inputs.length+this.outputs.length),s=t.slice(this.inputs.length+this.outputs.length,this.inputs.length+2*this.outputs.length),i=[],totalLossFunction=()=>{let t,o=[];for(let t=0;t1&&t{t=a$(t,r)}),t},o=this.collectedTrainableWeights.map(t=>t.read());return[this.optimizer_.minimize(totalLossFunction,!0,o)].concat(i)}}makeTestFunction(){this.testFunction=t=>globals_tidy(()=>{let r,a=[],n=t.slice(0,this.inputs.length),s=t.slice(this.inputs.length,this.inputs.length+this.outputs.length),i=[];for(let t=0;t0){if(A=!0,2===a.validationData.length)l=a.validationData[0],u=a.validationData[1];else if(3===a.validationData.length)throw new errors_NotImplementedError("validationData including sample weights is not supported yet.");else throw new errors_ValueError(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${a.validationData} is invalid.`);let t=await this.standardizeUserData(l,u,null,null,!0,C);p=t[0],m=t[1],_=p.concat(m)}else if(null!=a.validationSplit&&a.validationSplit>0&&a.validationSplit<1){A=!0;let t=Math.floor(n[0].shape[0]*(1-a.validationSplit)),r=n[0].shape[0];p=sliceArrays(n,t,r),i=n,n=sliceArrays(n,0,t),m=sliceArrays(s,t,r),o=s,s=sliceArrays(s,0,t),_=p.concat(m)}else null!=a.validationSteps&&(A=!0);let $=n.concat(s).concat(y);this.checkTrainableWeightsConsistency();let F=this.makeTrainFunction(),D=this.getDedupedMetricsNames();A?(this.makeTestFunction(),w=this.testFunction,I=D.slice().concat(D.map(t=>"val_"+t))):(w=null,_=[],I=D.slice());let P=standardizeCallbacks(a.callbacks,a.yieldEvery);return await this.fitLoop(F,$,D,C,a.epochs,a.verbose,P,w,_,a.shuffle,I,a.initialEpoch,null,null)}finally{this.isTraining=!1,disposeNewTensors(n,t),disposeNewTensors(s,r),disposeNewTensors(i,t),disposeNewTensors(o,r),disposeNewTensors(p,l),disposeNewTensors(m,u),null!=y&&globals_dispose(y)}}async fitLoop(t,r,a,n,s,i,o,l,u,p,m,y,_,w){let I;null==n&&(n=32),null==s&&(s=1),null==p&&(p=!0),null==y&&(y=0);let C=!1;if(null!=l&&null!=u&&(C=!0),null!=w&&(C=!0,null==_))throw new errors_ValueError("Can only use `validationSteps` when doing step-wise training, i.e., `stepsPerEpoch` must be set.");let E=this.checkNumSamples(r,n,_,"steps_per_epoch");null!=E&&(I=math_utils_range(0,E)),null==i&&(i=1);let{callbackList:A,history:$}=configureCallbacks(o,i,s,y,E,_,n,C,m);A.setModel(this),this.history=$,await A.onTrainBegin(),this.stopTraining_=!1;for(let i=y;i{let y=o[p][0],_=o[p][1],w=sliceAlongFirstAxis(i,y,_-y);m.batch=p,m.size=_-y;let I=t(sliceArraysByIndices(r,w));for(let t=0;ttoSnakeCase(t))}else{let r=Object.keys(this.loss);t={};let a=this.loss;for(let n of r)if("string"==typeof a[n])t[n]=toSnakeCase(a[n]);else throw Error("Serialization of non-string loss is not supported.")}return t}getMetricIdentifiers(){if("string"==typeof this.metrics||"function"==typeof this.metrics)return[toSnakeCase(getLossOrMetricName(this.metrics))];{if(Array.isArray(this.metrics))return this.metrics.map(t=>toSnakeCase(getLossOrMetricName(t)));let t={};for(let r in this.metrics)t[r]=toSnakeCase(getLossOrMetricName(this.metrics[r]));return t}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(t){let r,a;if(null!=t.weighted_metrics)throw Error("Loading weight_metrics is not supported yet.");if(null!=t.loss_weights)throw Error("Loading loss_weights is not supported yet.");if(null!=t.sample_weight_mode)throw Error("Loading sample_weight_mode is not supported yet.");let n=serialization_deserialize(serialization_utils_convertPythonicToTs(t.optimizer_config));if("string"==typeof t.loss)r=toCamelCase(t.loss);else if(Array.isArray(t.loss))r=t.loss.map(t=>toCamelCase(t));else if(null!=t.loss)for(let a in r={},t.loss)r[a]=toCamelCase(t.loss[a]);if(Array.isArray(t.metrics))a=t.metrics.map(t=>toCamelCase(t));else if(null!=t.metrics)for(let r in a={},t.metrics)a[r]=toCamelCase(t.metrics[r]);this.compile({loss:r,metrics:a,optimizer:n})}async save(t,r){if("string"==typeof t){let r=getSaveHandlers(t);if(0===r.length)throw new errors_ValueError(`Cannot find any save handlers for URL '${t}'`);if(r.length>1)throw new errors_ValueError(`Found more than one (${r.length}) save handlers for URL '${t}'`);t=r[0]}if(null==t.save)throw new errors_ValueError("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");let a=await encodeWeights(this.getNamedWeights(r)),n={modelTopology:this.toJSON(null,!1),format:"layers-model",generatedBy:`TensorFlow.js tfjs-layers v${la}`,convertedBy:null};if(null!=r&&r.includeOptimizer&&null!=this.optimizer){n.trainingConfig=this.getTrainingConfig();let{data:t,specs:r}=await encodeWeights(await this.optimizer.getWeights(),"optimizer");a.specs.push(...r),a.data=concatenateArrayBuffers([a.data,t])}return null!=this.userDefinedMetadata&&(checkUserDefinedMetadata(this.userDefinedMetadata,this.name,!0),n.userDefinedMetadata=this.userDefinedMetadata),n.weightData=a.data,n.weightSpecs=a.specs,t.save(n)}setUserDefinedMetadata(t){checkUserDefinedMetadata(t,this.name),this.userDefinedMetadata=t}getUserDefinedMetadata(){return this.userDefinedMetadata}};training_LayersModel.className="Model",registerClass(training_LayersModel);let Functional=class Functional extends training_LayersModel{};Functional.className="Functional",registerClass(Functional);let models_Sequential=class models_Sequential extends training_LayersModel{constructor(t){if(super({inputs:[],outputs:[]}),t=t||{},this.trainable=!0,this.built=!1,this.name=null!=t.name?t.name:getUid("sequential_"),null!=t.layers)for(const r of t.layers)this.add(r)}checkShape(t){if(t.inboundNodes[0].outputTensors[0].shape.some(t=>t<0))throw new errors_ValueError(`Negative dimension size caused by adding layer ${t.name} with input shape [${t.inboundNodes[0].inputTensors[0].shape}]`)}add(t){let r,a=t instanceof models_Sequential||t instanceof training_LayersModel;if(a){if(1!==(r=t).outputs.length)throw new errors_ValueError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(1!==r.inputs.length)throw new errors_ValueError("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(0===this.outputs.length){if(0===t.inboundNodes.length){if(null==t.batchInputShape)throw new errors_ValueError("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");let r=input_layer_Input({batchShape:t.batchInputShape,dtype:t.dtype,name:t.name+"_input"});t.apply(r)}if(a)this.outputs=r.outputs,this.inputs=r.inputs;else{if(1!==t.inboundNodes.length)throw new errors_ValueError(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${t.name} which has ${t.inboundNodes.length} pre-existing inbound connections.`);if(1!==t.inboundNodes[0].outputTensors.length)throw new errors_ValueError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(t),this.outputs=[t.inboundNodes[0].outputTensors[0]],this.inputs=getSourceInputs(this.outputs[0])}this.inboundNodes=[],new Node({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:pyListRepeat(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map(t=>t.shape),outputShapes:this.outputs[0].shape})}else{let r=t.apply(this.outputs[0]);if(Array.isArray(r))throw TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(t),this.outputs=[r],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(t),this.built=!1}pop(){if(0===this.layers.length)throw TypeError("There are no layers in the model.");if(this.layers.pop(),0===this.layers.length)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{let t=this.layers.length-1;this.layers[t].outboundNodes=[],this.outputs=[this.layers[t].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(t,r){return null==this.model&&this.build(),this.model.call(t,r)}build(t){if(getExactlyOneShape(t),0===this.inputs.length||0===this.outputs.length)throw TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new training_LayersModel({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(t,r,a=console.log){this.built||this.build(),super.summary(t,r,a)}setWeights(t){null==this.model&&this.build(),this.model.setWeights(t)}evaluate(t,r,a={}){if(!this.built)throw new RuntimeError("The model needs to be compiled before being used.");return this.model.evaluate(t,r,a)}async evaluateDataset(t,r){if(!this.built)throw new RuntimeError("The model needs to be compiled before being used.");return this.model.evaluateDataset(t,r)}predict(t,r={}){return null==this.model&&this.build(),this.model.predict(t,r)}predictOnBatch(t){return null==this.model&&this.build(),this.model.predictOnBatch(t)}compile(t){this.build(),this.model.compile(t),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return null==this.model?void 0:this.model.optimizer}set optimizer(t){this.model.optimizer=t}async fit(t,r,a={}){if(!this.built)throw new RuntimeError("The model needs to be compiled before being used.");return this.model.fit(t,r,a)}async fitDataset(t,r){if(!this.built)throw new RuntimeError("The model needs to be compiled before being used.");return this.model.fitDataset(t,r)}async trainOnBatch(t,r){return this.model.trainOnBatch(t,r)}static fromConfig(t,r,a={},n=!1){let s,i={};if(r instanceof Array){if(null==r[0].className||"Merge"===r[0].className)throw new errors_ValueError("Legacy serialization format not supported yet.");s=r}else assert(null!=r.layers,()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field."),s=r.layers,delete r.layers,i=r;let o=new t(i);if(!(o instanceof models_Sequential))throw new errors_NotImplementedError(`Sequential.fromConfig called on non-Sequential input: ${o}`);for(let t of s){let r=serialization_deserialize(t,void 0,n);n&&r.setFastWeightInitDuringBuild(!0),o.add(r)}return o}set stopTraining(t){if(null==this.model)throw new errors_ValueError("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=t}get stopTraining(){if(null==this.model)throw new errors_ValueError("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){let t=[];for(let r of this.layers){let a={};a.className=r.getClassName(),a.config=r.getConfig(),t.push(a)}return{name:this.name,layers:t}}};models_Sequential.className="Sequential",registerClass(models_Sequential);let activations_Activation=class activations_Activation extends Serializable{getConfig(){return{}}};let activations_Elu=class activations_Elu extends activations_Activation{apply(t,r=1){return tfjs_backend_elu(t,r)}};activations_Elu.className="elu",registerClass(activations_Elu);let activations_Selu=class activations_Selu extends activations_Activation{apply(t){return s8(t)}};activations_Selu.className="selu",registerClass(activations_Selu);let activations_Relu=class activations_Relu extends activations_Activation{apply(t){return sJ(t)}};activations_Relu.className="relu",registerClass(activations_Relu);let activations_Relu6=class activations_Relu6 extends activations_Activation{apply(t){return globals_tidy(()=>s_(6,sJ(t)))}};activations_Relu6.className="relu6",registerClass(activations_Relu6);let Linear=class Linear extends activations_Activation{apply(t){return t}};Linear.className="linear",registerClass(Linear);let activations_Sigmoid=class activations_Sigmoid extends activations_Activation{apply(t){return ne(t)}};activations_Sigmoid.className="sigmoid",registerClass(activations_Sigmoid);let HardSigmoid=class HardSigmoid extends activations_Activation{apply(t){return hardSigmoid(t)}};HardSigmoid.className="hardSigmoid",registerClass(HardSigmoid);let activations_Softplus=class activations_Softplus extends activations_Activation{apply(t){return so(t)}};activations_Softplus.className="softplus",registerClass(activations_Softplus);let Softsign=class Softsign extends activations_Activation{apply(t){return softsign(t)}};Softsign.className="softsign",registerClass(Softsign);let activations_Tanh=class activations_Tanh extends activations_Activation{apply(t){return nr(t)}};activations_Tanh.className="tanh",registerClass(activations_Tanh);let activations_Softmax=class activations_Softmax extends activations_Activation{apply(t,r=-1){return io(t,r)}};activations_Softmax.className="softmax",registerClass(activations_Softmax);let activations_LogSoftmax=class activations_LogSoftmax extends activations_Activation{apply(t,r=-1){return su(t,r)}};activations_LogSoftmax.className="logSoftmax",registerClass(activations_LogSoftmax);let Gelu=class Gelu extends activations_Activation{apply(t){return globals_tidy(()=>globals_tidy(()=>{let r=aD(.5,a$(1,nG(aF(t,Math.sqrt(2)))));return aD(t,r)}))}};Gelu.className="gelu",registerClass(Gelu);let GeluNew=class GeluNew extends activations_Activation{apply(t){return globals_tidy(()=>aD(.5,aD(t,a$(1,nr(aD(aP(aF(2,Math.PI)),a$(t,aD(.044715,aV(t,3)))))))))}};GeluNew.className="gelu_new",registerClass(GeluNew);let Mish=class Mish extends activations_Activation{apply(t){return globals_tidy(()=>aD(t,nr(so(t))))}};Mish.className="mish",registerClass(Mish);let Swish=class Swish extends activations_Activation{apply(t,r=1){return globals_tidy(()=>aD(ne(aD(t,r)),t))}};function serializeActivation(t){return t.getClassName()}function deserializeActivation(t,r={}){return deserializeKerasObject(t,SerializationMap.getMap().classNameMap,r,"activation")}function getActivation(t){if(null==t){let t={};return t.className="linear",t.config={},deserializeActivation(t)}if("string"==typeof t){let r={};return r.className=t,r.config={},deserializeActivation(r)}return t instanceof activations_Activation?t:deserializeActivation(t)}function assertObjectArgs(t){if(null!=t&&"object"!=typeof t)throw Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${t}`)}Swish.className="swish",registerClass(Swish);let Regularizer=class Regularizer extends Serializable{};let regularizers_L1L2=class regularizers_L1L2 extends Regularizer{constructor(t){super(),assertObjectArgs(t),this.l1=null==t||null==t.l1?.01:t.l1,this.l2=null==t||null==t.l2?.01:t.l2,this.hasL1=0!==this.l1,this.hasL2=0!==this.l2}apply(t){return globals_tidy(()=>{let r=zeros([1]);return this.hasL1&&(r=a$(r,nH(aD(this.l1,aW(t))))),this.hasL2&&(r=a$(r,nH(aD(this.l2,aD(t,t))))),a6(r,[])})}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(t,r){return new t({l1:r.l1,l2:r.l2})}};regularizers_L1L2.className="L1L2",registerClass(regularizers_L1L2);let ln={l1l2:"L1L2"};function deserializeRegularizer(t,r={}){return deserializeKerasObject(t,SerializationMap.getMap().classNameMap,r,"regularizer")}function getRegularizer(t){return null==t?null:"string"==typeof t?deserializeRegularizer({className:t in ln?ln[t]:t,config:{}}):t instanceof Regularizer?t:deserializeRegularizer(t)}let advanced_activations_ReLU=class advanced_activations_ReLU extends Layer{constructor(t){super(null==t?{}:t),this.supportsMasking=!0,null!=t&&(this.maxValue=t.maxValue)}call(t,r){let a=sJ(t=getExactlyOneTensor(t));return null!=this.maxValue&&(a=nf(a,0,this.maxValue)),a}computeOutputShape(t){return t}getConfig(){let t={maxValue:this.maxValue};return Object.assign(t,super.getConfig()),t}};advanced_activations_ReLU.className="ReLU",registerClass(advanced_activations_ReLU);let advanced_activations_LeakyReLU=class advanced_activations_LeakyReLU extends Layer{constructor(t){super(null==t?{}:t),this.DEFAULT_ALPHA=.3,null==t&&(t={}),this.alpha=null==t.alpha?this.DEFAULT_ALPHA:t.alpha}call(t,r){return n9(getExactlyOneTensor(t),this.alpha)}computeOutputShape(t){return t}getConfig(){let t={alpha:this.alpha};return Object.assign(t,super.getConfig()),t}};advanced_activations_LeakyReLU.className="LeakyReLU",registerClass(advanced_activations_LeakyReLU);let advanced_activations_PReLU=class advanced_activations_PReLU extends Layer{constructor(t){if(super(null==t?{}:t),this.DEFAULT_ALPHA_INITIALIZER="zeros",null==t&&(t={}),this.supportsMasking=!0,this.alphaInitializer=getInitializer(t.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=getRegularizer(t.alphaRegularizer),this.alphaConstraint=getConstraint(t.alphaConstraint),null==t.sharedAxes)this.sharedAxes=null;else if(Array.isArray(t.sharedAxes))this.sharedAxes=t.sharedAxes;else if("number"==typeof t.sharedAxes)this.sharedAxes=[t.sharedAxes];else throw new errors_ValueError(`Expected sharedAxes to be a number or an array of numbers, but got ${t.sharedAxes}`)}build(t){let r=(t=getExactlyOneShape(t)).slice(1);if(null!=this.sharedAxes)for(let t of this.sharedAxes)r[t-1]=1;this.alpha=this.addWeight("alpha",r,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);let a={};if(null!=this.sharedAxes)for(let r=1;r{let a=getExactlyOneTensor(t),n=r.mask;if(null!=n){let t=aD(aB(ones_ones(a.shape),aE(n,a.dtype)),scalar_scalar(-1e9));a=a$(a,t)}if(this.axis instanceof Array)if(this.axis.length>1)return nY(aB(a,sp(a,this.axis,!0)));else return this.softmax(a,this.axis[0]);return this.softmax(a,this.axis)})}computeOutputShape(t){return t}getConfig(){let t={axis:this.axis};return Object.assign(t,super.getConfig()),t}};function normalizeArray(t,r,a){if("number"==typeof t)return pyListRepeat(t,r);if(t.length!==r)throw new errors_ValueError(`The ${a} argument must be an integer or tuple of ${r} integers. Received: ${t.length} elements.`);for(let n=0;n(common_checkDataFormat(r),"channelsFirst"===r)?iI(t,[0,2,3,1]):t)}function preprocessConv3DInput(t,r){return globals_tidy(()=>(common_checkDataFormat(r),"channelsFirst"===r)?iI(t,[0,2,3,4,1]):t)}function conv1dWithBias(t,r,a,n=1,s="valid",i,o=1){return globals_tidy(()=>{if(null==i&&(i=imageDataFormat()),common_checkDataFormat(i),3!==t.shape.length)throw new errors_ValueError(`The input of a conv1dWithBias operation should be 3, but is ${t.shape.length} instead.`);if(3!==r.shape.length)throw new errors_ValueError(`The kernel for a conv1dWithBias operation should be 3, but is ${r.shape.length} instead`);if(null!=a&&1!==a.shape.length)throw new errors_ValueError(`The bias for a conv1dWithBias operation should be 1, but is ${a.shape.length} instead`);if("channelsFirst"===i&&(t=iI(t,[0,2,1])),"causal"===s)throw new errors_NotImplementedError("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let l=nT(t,r,n,"same"===s?"same":"valid","NWC",o);return null!=a&&(l=biasAdd(l,a)),l})}function conv2dWithBiasActivation(t,r,a,n=[1,1],s="valid",i,o,l=null){return globals_tidy(()=>{if(null==i&&(i=imageDataFormat()),common_checkDataFormat(i),3!==t.rank&&4!==t.rank)throw new errors_ValueError(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${t.rank}.`);if(3!==r.rank&&4!==r.rank)throw new errors_ValueError(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${t.rank}.`);let u=preprocessConv2DInput(t,i);if("causal"===s)throw new errors_NotImplementedError("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return u=iF({x:u,filter:r,strides:n,pad:"same"===s?"same":"valid",dilations:o,dataFormat:"NHWC",bias:a,activation:l}),"channelsFirst"===i&&(u=iI(u,[0,3,1,2])),u})}function conv3dWithBias(t,r,a,n=[1,1,1],s="valid",i,o){return globals_tidy(()=>{if(null==i&&(i=imageDataFormat()),common_checkDataFormat(i),4!==t.rank&&5!==t.rank)throw new errors_ValueError(`conv3dWithBias expects input to be of rank 4 or 5, but received ${t.rank}.`);if(4!==r.rank&&5!==r.rank)throw new errors_ValueError(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${t.rank}.`);let l=preprocessConv3DInput(t,i);if("causal"===s)throw new errors_NotImplementedError("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return l=nw(l,r,n,"same"===s?"same":"valid","NDHWC",o),null!=a&&(l=biasAdd(l,a)),"channelsFirst"===i&&(l=iI(l,[0,4,1,2,3])),l})}advanced_activations_Softmax.className="Softmax",registerClass(advanced_activations_Softmax);let BaseConv=class BaseConv extends Layer{constructor(t,r){if(super(r),this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",BaseConv.verifyArgs(r),this.rank=t,assertPositiveInteger(this.rank,"rank"),1!==this.rank&&2!==this.rank&&3!==this.rank)throw new errors_NotImplementedError(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=normalizeArray(r.kernelSize,t,"kernelSize"),this.strides=normalizeArray(null==r.strides?1:r.strides,t,"strides"),this.padding=null==r.padding?"valid":r.padding,checkPaddingMode(this.padding),this.dataFormat=null==r.dataFormat?"channelsLast":r.dataFormat,common_checkDataFormat(this.dataFormat),this.activation=getActivation(r.activation),this.useBias=null==r.useBias||r.useBias,this.biasInitializer=getInitializer(r.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=getConstraint(r.biasConstraint),this.biasRegularizer=getRegularizer(r.biasRegularizer),this.activityRegularizer=getRegularizer(r.activityRegularizer),this.dilationRate=normalizeArray(null==r.dilationRate?1:r.dilationRate,t,"dilationRate"),1===this.rank&&Array.isArray(this.dilationRate)&&1!==this.dilationRate.length)throw new errors_ValueError(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(2===this.rank){if("number"==typeof this.dilationRate)this.dilationRate=[this.dilationRate,this.dilationRate];else if(2!==this.dilationRate.length)throw new errors_ValueError(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(3===this.rank){if("number"==typeof this.dilationRate)this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(3!==this.dilationRate.length)throw new errors_ValueError(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}}static verifyArgs(t){if(generic_utils_assert("kernelSize"in t,"required key 'kernelSize' not in config"),"number"!=typeof t.kernelSize&&!checkArrayTypeAndLength(t.kernelSize,"number",1,3))throw new errors_ValueError(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(t.kernelSize)}.`)}getConfig(){let t={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:serializeActivation(this.activation),useBias:this.useBias,biasInitializer:serializeKerasObject(this.biasInitializer),biasRegularizer:serializeKerasObject(this.biasRegularizer),activityRegularizer:serializeKerasObject(this.activityRegularizer),biasConstraint:serializeKerasObject(this.biasConstraint)};return Object.assign(t,super.getConfig()),t}};let Conv=class Conv extends BaseConv{constructor(t,r){super(t,r),this.kernel=null,Conv.verifyArgs(r),this.filters=r.filters,assertPositiveInteger(this.filters,"filters"),this.kernelInitializer=getInitializer(r.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=getConstraint(r.kernelConstraint),this.kernelRegularizer=getRegularizer(r.kernelRegularizer)}build(t){t=getExactlyOneShape(t);let r="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[r])throw new errors_ValueError(`The channel dimension of the input should be defined. Found ${t[r]}`);let a=t[r],n=this.kernelSize.concat([a,this.filters]);this.kernel=this.addWeight("kernel",n,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[r]:a}}],this.built=!0}call(t,r){return globals_tidy(()=>{let r;t=getExactlyOneTensor(t);let a=null==this.bias?null:this.bias.read(),n=mapActivationToFusedKernel(this.activation.getClassName());if(null!=n&&2===this.rank)r=conv2dWithBiasActivation(t,this.kernel.read(),a,this.strides,this.padding,this.dataFormat,this.dilationRate,n);else{if(1===this.rank)r=conv1dWithBias(t,this.kernel.read(),a,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(2===this.rank)r=conv2dWithBiasActivation(t,this.kernel.read(),a,this.strides,this.padding,this.dataFormat,this.dilationRate);else if(3===this.rank)r=conv3dWithBias(t,this.kernel.read(),a,this.strides,this.padding,this.dataFormat,this.dilationRate);else throw new errors_NotImplementedError("convolutions greater than 3D are not implemented yet.");null!=this.activation&&(r=this.activation.apply(r))}return r})}computeOutputShape(t){t=getExactlyOneShape(t);let r=[],a="channelsLast"===this.dataFormat?t.slice(1,t.length-1):t.slice(2);for(let t=0;t 0 but got ${JSON.stringify(t.filters)}`)}};let convolutional_Conv2D=class convolutional_Conv2D extends Conv{constructor(t){super(2,t),convolutional_Conv2D.verifyArgs(t)}getConfig(){let t=super.getConfig();return delete t.rank,t}static verifyArgs(t){if("number"!=typeof t.kernelSize&&!checkArrayTypeAndLength(t.kernelSize,"number",1,2))throw new errors_ValueError(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(t.kernelSize)}.`)}};convolutional_Conv2D.className="Conv2D",registerClass(convolutional_Conv2D);let convolutional_Conv3D=class convolutional_Conv3D extends Conv{constructor(t){super(3,t),convolutional_Conv3D.verifyArgs(t)}getConfig(){let t=super.getConfig();return delete t.rank,t}static verifyArgs(t){if("number"!=typeof t.kernelSize&&!(Array.isArray(t.kernelSize)&&(1===t.kernelSize.length||3===t.kernelSize.length)))throw new errors_ValueError(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(t.kernelSize)}.`)}};convolutional_Conv3D.className="Conv3D",registerClass(convolutional_Conv3D);let convolutional_Conv2DTranspose=class convolutional_Conv2DTranspose extends convolutional_Conv2D{constructor(t){if(super(t),this.inputSpec=[new InputSpec({ndim:4})],"same"!==this.padding&&"valid"!==this.padding)throw new errors_ValueError(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(t){if(4!==(t=getExactlyOneShape(t)).length)throw new errors_ValueError("Input should have rank 4; Received input shape: "+JSON.stringify(t));let r="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[r])throw new errors_ValueError("The channel dimension of the inputs should be defined. Found `None`.");let a=t[r],n=this.kernelSize.concat([this.filters,a]);this.kernel=this.addWeight("kernel",n,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new InputSpec({ndim:4,axes:{[r]:a}})],this.built=!0}call(t,r){return globals_tidy(()=>{let r,a,n=getExactlyOneTensor(t);if(4!==n.shape.length)throw new errors_ValueError(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${n.shape.length}`);let s=n.shape,i=s[0];"channelsFirst"===this.dataFormat?(r=2,a=3):(r=1,a=2);let o=s[r],l=s[a],u=this.kernelSize[0],p=this.kernelSize[1],m=this.strides[0],y=this.strides[1],_=[i,deconvLength(o,m,u,this.padding),deconvLength(l,y,p,this.padding),this.filters];"channelsLast"!==this.dataFormat&&(n=iI(n,[0,2,3,1]));let w=nS(n,this.kernel.read(),_,this.strides,this.padding);return"channelsLast"!==this.dataFormat&&(w=iI(w,[0,3,1,2])),null!=this.bias&&(w=biasAdd(w,this.bias.read(),this.dataFormat)),null!=this.activation&&(w=this.activation.apply(w)),w})}computeOutputShape(t){let r,a,n,s=(t=getExactlyOneShape(t)).slice();"channelsFirst"===this.dataFormat?(r=1,a=2,n=3):(r=3,a=1,n=2);let i=this.kernelSize[0],o=this.kernelSize[1],l=this.strides[0],u=this.strides[1];return s[r]=this.filters,s[a]=deconvLength(s[a],l,i,this.padding),s[n]=deconvLength(s[n],u,o,this.padding),s}getConfig(){let t=super.getConfig();return delete t.dilationRate,t}};convolutional_Conv2DTranspose.className="Conv2DTranspose",registerClass(convolutional_Conv2DTranspose);let convolutional_Conv3DTranspose=class convolutional_Conv3DTranspose extends convolutional_Conv3D{constructor(t){if(super(t),this.inputSpec=[new InputSpec({ndim:5})],"same"!==this.padding&&"valid"!==this.padding)throw new errors_ValueError(`Conv3DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(t){if(5!==(t=getExactlyOneShape(t)).length)throw new errors_ValueError("Input should have rank 5; Received input shape: "+JSON.stringify(t));let r="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[r])throw new errors_ValueError("The channel dimension of the inputs should be defined. Found `None`.");let a=t[r],n=this.kernelSize.concat([this.filters,a]);this.kernel=this.addWeight("kernel",n,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new InputSpec({ndim:5,axes:{[r]:a}})],this.built=!0}call(t,r){return globals_tidy(()=>{let r,a,n,s=getExactlyOneTensor(t);if(5!==s.shape.length)throw new errors_ValueError(`Conv3DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${s.shape.length}`);let i=s.shape,o=i[0];"channelsFirst"===this.dataFormat?(n=2,r=3,a=4):(n=1,r=2,a=3);let l=i[n],u=i[r],p=i[a],m=this.kernelSize[0],y=this.kernelSize[1],_=this.kernelSize[2],w=this.strides[0],I=this.strides[1],C=this.strides[2],E=[o,deconvLength(l,w,m,this.padding),deconvLength(u,I,y,this.padding),deconvLength(p,C,_,this.padding),this.filters];"channelsLast"!==this.dataFormat&&(s=iI(s,[0,2,3,4,1]));let A=nN(s,this.kernel.read(),E,this.strides,this.padding);return"channelsLast"!==this.dataFormat&&(A=iI(A,[0,4,1,2,3])),null!==this.bias&&(A=biasAdd(A,this.bias.read(),this.dataFormat)),null!==this.activation&&(A=this.activation.apply(A)),A})}computeOutputShape(t){let r,a,n,s,i=(t=getExactlyOneShape(t)).slice();"channelsFirst"===this.dataFormat?(r=1,a=2,n=3,s=4):(r=4,a=1,n=2,s=3);let o=this.kernelSize[0],l=this.kernelSize[1],u=this.kernelSize[2],p=this.strides[0],m=this.strides[1],y=this.strides[2];return i[r]=this.filters,i[a]=deconvLength(i[a],p,o,this.padding),i[n]=deconvLength(i[n],m,l,this.padding),i[s]=deconvLength(i[s],y,u,this.padding),i}getConfig(){let t=super.getConfig();return delete t.dilationRate,t}};convolutional_Conv3DTranspose.className="Conv3DTranspose",registerClass(convolutional_Conv3DTranspose);let SeparableConv=class SeparableConv extends Conv{constructor(t,r){if(super(t,r),this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,null==r.filters)throw new errors_ValueError("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(null!=r.kernelInitializer||null!=r.kernelRegularizer||null!=r.kernelConstraint)throw new errors_ValueError("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(null!=r.padding&&"same"!==r.padding&&"valid"!==r.padding)throw new errors_ValueError(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(r.padding)}`);this.depthMultiplier=null==r.depthMultiplier?1:r.depthMultiplier,this.depthwiseInitializer=getInitializer(r.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=getRegularizer(r.depthwiseRegularizer),this.depthwiseConstraint=getConstraint(r.depthwiseConstraint),this.pointwiseInitializer=getInitializer(r.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=getRegularizer(r.pointwiseRegularizer),this.pointwiseConstraint=getConstraint(r.pointwiseConstraint)}build(t){if((t=getExactlyOneShape(t)).length{let r;if(t=getExactlyOneTensor(t),1===this.rank)throw new errors_NotImplementedError("1D separable convolution is not implemented yet.");return 2===this.rank&&("channelsFirst"===this.dataFormat&&(t=iI(t,[0,2,3,1])),r=s7(t,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(r=biasAdd(r,this.bias.read(),this.dataFormat)),null!=this.activation&&(r=this.activation.apply(r)),"channelsFirst"===this.dataFormat&&(r=iI(r,[0,3,1,2])),r})}getConfig(){let t=super.getConfig();return delete t.rank,delete t.kernelInitializer,delete t.kernelRegularizer,delete t.kernelConstraint,t.depthwiseInitializer=serializeKerasObject(this.depthwiseInitializer),t.pointwiseInitializer=serializeKerasObject(this.pointwiseInitializer),t.depthwiseRegularizer=serializeKerasObject(this.depthwiseRegularizer),t.pointwiseRegularizer=serializeKerasObject(this.pointwiseRegularizer),t.depthwiseConstraint=serializeKerasObject(this.depthwiseConstraint),t.pointwiseConstraint=serializeKerasObject(this.pointwiseConstraint),t}};SeparableConv.className="SeparableConv";let convolutional_SeparableConv2D=class convolutional_SeparableConv2D extends SeparableConv{constructor(t){super(2,t)}};convolutional_SeparableConv2D.className="SeparableConv2D",registerClass(convolutional_SeparableConv2D);let convolutional_Conv1D=class convolutional_Conv1D extends Conv{constructor(t){super(1,t),convolutional_Conv1D.verifyArgs(t),this.inputSpec=[{ndim:3}]}getConfig(){let t=super.getConfig();return delete t.rank,delete t.dataFormat,t}static verifyArgs(t){if("number"!=typeof t.kernelSize&&!checkArrayTypeAndLength(t.kernelSize,"number",1,1))throw new errors_ValueError(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(t.kernelSize)}.`)}};convolutional_Conv1D.className="Conv1D",registerClass(convolutional_Conv1D);let convolutional_Cropping2D=class convolutional_Cropping2D extends Layer{constructor(t){super(t),"number"==typeof t.cropping?this.cropping=[[t.cropping,t.cropping],[t.cropping,t.cropping]]:"number"==typeof t.cropping[0]?this.cropping=[[t.cropping[0],t.cropping[0]],[t.cropping[1],t.cropping[1]]]:this.cropping=t.cropping,this.dataFormat=void 0===t.dataFormat?"channelsLast":t.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(t){return"channelsFirst"===this.dataFormat?[t[0],t[1],t[2]-this.cropping[0][0]-this.cropping[0][1],t[3]-this.cropping[1][0]-this.cropping[1][1]]:[t[0],t[1]-this.cropping[0][0]-this.cropping[0][1],t[2]-this.cropping[1][0]-this.cropping[1][1],t[3]]}call(t,r){return globals_tidy(()=>{if(t=getExactlyOneTensor(t),"channelsLast"===this.dataFormat){let r=sliceAlongAxis(t,this.cropping[0][0],t.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return sliceAlongAxis(r,this.cropping[1][0],t.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}{let r=sliceAlongAxis(t,this.cropping[0][0],t.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return sliceAlongAxis(r,this.cropping[1][0],t.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}})}getConfig(){let t={cropping:this.cropping,dataFormat:this.dataFormat};return Object.assign(t,super.getConfig()),t}};convolutional_Cropping2D.className="Cropping2D",registerClass(convolutional_Cropping2D);let convolutional_UpSampling2D=class convolutional_UpSampling2D extends Layer{constructor(t){super(t),this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=null==t.size?this.DEFAULT_SIZE:t.size,this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,common_checkDataFormat(this.dataFormat),this.interpolation=null==t.interpolation?"nearest":t.interpolation,checkInterpolationFormat(this.interpolation)}computeOutputShape(t){if("channelsFirst"===this.dataFormat){let r=null==t[2]?null:this.size[0]*t[2],a=null==t[3]?null:this.size[1]*t[3];return[t[0],t[1],r,a]}{let r=null==t[1]?null:this.size[0]*t[1],a=null==t[2]?null:this.size[1]*t[2];return[t[0],r,a,t[3]]}}call(t,r){return globals_tidy(()=>{let r=getExactlyOneTensor(t),a=r.shape;if("channelsFirst"===this.dataFormat){r=iI(r,[0,2,3,1]);let t=this.size[0]*a[2],n=this.size[1]*a[3];return iI("nearest"===this.interpolation?om.resizeNearestNeighbor(r,[t,n]):om.resizeBilinear(r,[t,n]),[0,3,1,2])}{let t=this.size[0]*a[1],n=this.size[1]*a[2];return"nearest"===this.interpolation?om.resizeNearestNeighbor(r,[t,n]):om.resizeBilinear(r,[t,n])}})}getConfig(){let t={size:this.size,dataFormat:this.dataFormat,interpolation:this.interpolation};return Object.assign(t,super.getConfig()),t}};function convolutional_depthwise_depthwiseConv2d(t,r,a=[1,1],n="valid",s,i){return globals_tidy(()=>{null==s&&(s=imageDataFormat()),common_checkDataFormat(s);let o=preprocessConv2DInput(t,s);if(4!==t.rank)throw new errors_ValueError(`Input for depthwiseConv2d is required to be 4-D, but is instead ${t.rank}-D`);if(4!==r.rank)throw new errors_ValueError(`depthwiseKernel is required to be 4-D, but is instead ${r.rank}-D`);return o=nD(o,r,a,"same"===n?"same":"valid","NHWC",i),"channelsFirst"===s&&(o=iI(o,[0,3,1,2])),o})}convolutional_UpSampling2D.className="UpSampling2D",registerClass(convolutional_UpSampling2D);let convolutional_depthwise_DepthwiseConv2D=class convolutional_depthwise_DepthwiseConv2D extends BaseConv{constructor(t){super(2,t),this.depthwiseKernel=null,this.depthMultiplier=null==t.depthMultiplier?1:t.depthMultiplier,this.depthwiseInitializer=getInitializer(t.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=getConstraint(t.depthwiseConstraint),this.depthwiseRegularizer=getRegularizer(t.depthwiseRegularizer)}build(t){if((t=getExactlyOneShape(t)).length<4)throw new errors_ValueError(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(t)}.`);let r="channelsFirst"===this.dataFormat?1:3;if(null==t[r]||t[r]<0)throw new errors_ValueError(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${t[r]}).`);let a=t[r],n=[this.kernelSize[0],this.kernelSize[1],a,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",n,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[a*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,r){return globals_tidy(()=>{let r=convolutional_depthwise_depthwiseConv2d(t=getExactlyOneTensor(t),this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(r=biasAdd(r,this.bias.read(),this.dataFormat)),null!=this.activation&&(r=this.activation.apply(r)),r})}computeOutputShape(t){t=getExactlyOneShape(t);let r="channelsFirst"===this.dataFormat?t[2]:t[1],a="channelsFirst"===this.dataFormat?t[3]:t[2],n="channelsFirst"===this.dataFormat?t[1]*this.depthMultiplier:t[3]*this.depthMultiplier,s=convOutputLength(r,this.kernelSize[0],this.padding,this.strides[0]),i=convOutputLength(a,this.kernelSize[1],this.padding,this.strides[1]);return"channelsFirst"===this.dataFormat?[t[0],n,s,i]:[t[0],s,i,n]}getConfig(){let t=super.getConfig();return t.depthMultiplier=this.depthMultiplier,t.depthwiseInitializer=serializeKerasObject(this.depthwiseInitializer),t.depthwiseRegularizer=serializeKerasObject(this.depthwiseRegularizer),t.depthwiseConstraint=serializeKerasObject(this.depthwiseRegularizer),t}};function standardizeArgs(t,r,a,n){if(Array.isArray(t)){if(null!=r||null!=a)throw new errors_ValueError("When inputs is an array, neither initialState or constants should be provided");null!=n&&(a=t.slice(t.length-n,t.length),t=t.slice(0,t.length-n)),t.length>1&&(r=t.slice(1,t.length)),t=t[0]}function toListOrNull(t){return null==t||Array.isArray(t)?t:[t]}return{inputs:t,initialState:r=toListOrNull(r),constants:a=toListOrNull(a)}}function rnn(t,r,a,n=!1,s,i,o=!1,l=!1){return globals_tidy(()=>{let u,p,m,y=r.shape.length;if(y<3)throw new errors_ValueError(`Input should be at least 3D, but is ${y}D.`);let _=[1,0].concat(math_utils_range(2,y));if(r=iI(r,_),null!=i)throw new errors_NotImplementedError("The rnn() functoin of the deeplearn.js backend does not support constants yet.");o&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),null!=s&&((s=aE(aE(s,"bool"),"float32")).rank===y-1&&(s=nZ(s,-1)),s=iI(s,_)),n&&(r=s0(r,0),null!=s&&(s=s0(s,0)));let w=[],I=a,C=r.shape[0],E=iw(r);null!=s&&(p=iw(s));for(let r=0;rt(a,I));if(null==s)u=n[0],I=n[1];else{let t=globals_tidy(()=>{let t=p[r],a=aB(sE(t),t);return{output:a$(aD(n[0],t),aD(I[0],a)),newStates:I.map((r,s)=>a$(aD(n[1][s],t),aD(r,a)))}});u=t.output,I=t.newStates}l&&w.push(u)}return l&&(m=ig(w,1)),[u,m,I]})}convolutional_depthwise_DepthwiseConv2D.className="DepthwiseConv2D",registerClass(convolutional_depthwise_DepthwiseConv2D);let recurrent_RNN=class recurrent_RNN extends Layer{constructor(t){let r;if(super(t),null==t.cell)throw new errors_ValueError("cell property is missing for the constructor of RNN.");if(null==(r=Array.isArray(t.cell)?new recurrent_StackedRNNCells({cells:t.cell}):t.cell).stateSize)throw new errors_ValueError("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=r,this.returnSequences=null!=t.returnSequences&&t.returnSequences,this.returnState=null!=t.returnState&&t.returnState,this.goBackwards=null!=t.goBackwards&&t.goBackwards,this._stateful=null!=t.stateful&&t.stateful,this.unroll=null!=t.unroll&&t.unroll,this.supportsMasking=!0,this.inputSpec=[new InputSpec({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){return null==this.states_?math_utils_range(0,Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1).map(t=>null):this.states_}setStates(t){this.states_=t}computeOutputShape(t){let r;isArrayOfShapes(t)&&(t=t[0]);let a=this.cell.stateSize;Array.isArray(a)||(a=[a]);let n=a[0];if(r=this.returnSequences?[t[0],t[1],n]:[t[0],n],!this.returnState)return r;{let n=[];for(let r of a)n.push([t[0],r]);return[r].concat(n)}}computeMask(t,r){return globals_tidy(()=>{Array.isArray(r)&&(r=r[0]);let t=this.returnSequences?r:null;return this.returnState?[t].concat(this.states.map(t=>null)):t})}get states(){if(null!=this.states_)return this.states_;{let t=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,r=[];for(let a=0;at.shape[t.shape.length-1]),r))throw new errors_ValueError(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=r.map(t=>new InputSpec({shape:[null,t]}));this.stateful&&this.resetStates()}resetStates(t,r=!1){globals_tidy(()=>{if(!this.stateful)throw new AttributeError("Cannot call resetStates() on an RNN Layer that is not stateful.");let a=this.inputSpec[0].shape[0];if(null==a)throw new errors_ValueError("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(null==this.states_)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(t=>zeros([a,t])):this.states_=[zeros([a,this.cell.stateSize])];else if(null==t)globals_dispose(this.states_),null!=this.keptStates&&(globals_dispose(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(t=>zeros([a,t])):this.states_[0]=zeros([a,this.cell.stateSize]);else{if(Array.isArray(t)||(t=[t]),t.length!==this.states_.length)throw new errors_ValueError(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${t.length} state value(s). Input received: ${t}`);!0===r?this.keptStates.push(this.states_.slice()):globals_dispose(this.states_);for(let r=0;rkeep(t.clone()))})}apply(t,r){let a=null==r?null:r.initialState,n=null==r?null:r.constants;null==r&&(r={});let s=standardizeArgs(t,a,n,this.numConstants);t=s.inputs,a=s.initialState,n=s.constants;let i=[],o=[];if(null!=a){for(let t of(r.initialState=a,i=i.concat(a),this.stateSpec=[],a))this.stateSpec.push(new InputSpec({shape:t.shape}));o=o.concat(this.stateSpec)}if(null!=n&&(r.constants=n,i=i.concat(n),this.numConstants=n.length),!(i[0]instanceof SymbolicTensor))return super.apply(t,r);{let a=[t].concat(i),n=this.inputSpec.concat(o),s=this.inputSpec;this.inputSpec=n;let l=super.apply(a,r);return this.inputSpec=s,l}}call(t,r){return globals_tidy(()=>{let a=null==r?null:r.mask,n=null==r?null:r.training,s=null==r?null:r.initialState;t=getExactlyOneTensor(t),null==s&&(s=this.stateful?this.states_:this.getInitialState(t));let i=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(s.length!==i)throw new errors_ValueError(`RNN Layer has ${i} state(s) but was passed ${s.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");let o={training:n},l=rnn((t,r)=>{let a=this.cell.call([t].concat(r),o);return[a[0],a.slice(1)]},t,s,this.goBackwards,a,null,this.unroll,this.returnSequences),u=l[0],p=l[1],m=l[2];this.stateful&&this.resetStates(m,n);let y=this.returnSequences?p:u;return this.returnState?[y].concat(m):y})}getInitialState(t){return globals_tidy(()=>{let r=zeros(t.shape);return(r=tfjs_backend_expandDims(r=nH(r,[1,2])),Array.isArray(this.cell.stateSize))?this.cell.stateSize.map(t=>t>1?tfjs_backend_tile(r,[1,t]):r):this.cell.stateSize>1?[tfjs_backend_tile(r,[1,this.cell.stateSize])]:[r]})}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),null!=this.cell&&this.cell.setFastWeightInitDuringBuild(t)}getConfig(){let t=super.getConfig(),r={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};null!=this.numConstants&&(r.numConstants=this.numConstants);let a=this.cell.getConfig();return this.getClassName()===recurrent_RNN.className&&(r.cell={className:this.cell.getClassName(),config:a}),Object.assign(Object.assign(Object.assign({},a),t),r)}static fromConfig(t,r,a={}){let n=serialization_deserialize(r.cell,a);return new t(Object.assign(r,{cell:n}))}};recurrent_RNN.className="RNN",registerClass(recurrent_RNN);let recurrent_RNNCell=class recurrent_RNNCell extends Layer{};let recurrent_SimpleRNNCell=class recurrent_SimpleRNNCell extends recurrent_RNNCell{constructor(t){super(t),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=t.units,assertPositiveInteger(this.units,"units"),this.activation=getActivation(null==t.activation?this.DEFAULT_ACTIVATION:t.activation),this.useBias=null==t.useBias||t.useBias,this.kernelInitializer=getInitializer(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=getInitializer(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=getInitializer(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=getRegularizer(t.kernelRegularizer),this.recurrentRegularizer=getRegularizer(t.recurrentRegularizer),this.biasRegularizer=getRegularizer(t.biasRegularizer),this.kernelConstraint=getConstraint(t.kernelConstraint),this.recurrentConstraint=getConstraint(t.recurrentConstraint),this.biasConstraint=getConstraint(t.biasConstraint),this.dropout=math_utils_min([1,math_utils_max([0,null==t.dropout?0:t.dropout])]),this.recurrentDropout=math_utils_min([1,math_utils_max([0,null==t.recurrentDropout?0:t.recurrentDropout])]),this.dropoutFunc=t.dropoutFunc,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){t=getExactlyOneShape(t),this.kernel=this.addWeight("kernel",[t[t.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,r){return globals_tidy(()=>{let a;if(2!==t.length)throw new errors_ValueError(`SimpleRNNCell expects 2 input Tensors, got ${t.length}.`);let n=t[1];t=t[0];let s=null!=r.training&&r.training;0sE(t),rate:this.dropout,training:s,dropoutFunc:this.dropoutFunc})),0sE(n),rate:this.recurrentDropout,training:s,dropoutFunc:this.dropoutFunc}));let i=this.dropoutMask,o=this.recurrentDropoutMask;a=null!=i?tfjs_backend_dot(aD(t,i),this.kernel.read()):tfjs_backend_dot(t,this.kernel.read()),null!=this.bias&&(a=biasAdd(a,this.bias.read())),null!=o&&(n=aD(n,o));let l=a$(a,tfjs_backend_dot(n,this.recurrentKernel.read()));return null!=this.activation&&(l=this.activation.apply(l)),[l,l]})}getConfig(){let t=super.getConfig(),r={units:this.units,activation:serializeActivation(this.activation),useBias:this.useBias,kernelInitializer:serializeKerasObject(this.kernelInitializer),recurrentInitializer:serializeKerasObject(this.recurrentInitializer),biasInitializer:serializeKerasObject(this.biasInitializer),kernelRegularizer:serializeKerasObject(this.kernelRegularizer),recurrentRegularizer:serializeKerasObject(this.recurrentRegularizer),biasRegularizer:serializeKerasObject(this.biasRegularizer),activityRegularizer:serializeKerasObject(this.activityRegularizer),kernelConstraint:serializeKerasObject(this.kernelConstraint),recurrentConstraint:serializeKerasObject(this.recurrentConstraint),biasConstraint:serializeKerasObject(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign(Object.assign({},t),r)}};recurrent_SimpleRNNCell.className="SimpleRNNCell",registerClass(recurrent_SimpleRNNCell);let recurrent_SimpleRNN=class recurrent_SimpleRNN extends recurrent_RNN{constructor(t){t.cell=new recurrent_SimpleRNNCell(t),super(t)}call(t,r){return globals_tidy(()=>{null!=this.cell.dropoutMask&&(globals_dispose(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(globals_dispose(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);let a=null==r?null:r.mask,n=null==r?null:r.training,s=null==r?null:r.initialState;return super.call(t,{mask:a,training:n,initialState:s})})}static fromConfig(t,r){return new t(r)}};recurrent_SimpleRNN.className="SimpleRNN",registerClass(recurrent_SimpleRNN);let recurrent_GRUCell=class recurrent_GRUCell extends recurrent_RNNCell{constructor(t){if(super(t),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",t.resetAfter)throw new errors_ValueError("GRUCell does not support reset_after parameter set to true.");this.units=t.units,assertPositiveInteger(this.units,"units"),this.activation=getActivation(void 0===t.activation?this.DEFAULT_ACTIVATION:t.activation),this.recurrentActivation=getActivation(void 0===t.recurrentActivation?this.DEFAULT_RECURRENT_ACTIVATION:t.recurrentActivation),this.useBias=null==t.useBias||t.useBias,this.kernelInitializer=getInitializer(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=getInitializer(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=getInitializer(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=getRegularizer(t.kernelRegularizer),this.recurrentRegularizer=getRegularizer(t.recurrentRegularizer),this.biasRegularizer=getRegularizer(t.biasRegularizer),this.kernelConstraint=getConstraint(t.kernelConstraint),this.recurrentConstraint=getConstraint(t.recurrentConstraint),this.biasConstraint=getConstraint(t.biasConstraint),this.dropout=math_utils_min([1,math_utils_max([0,null==t.dropout?0:t.dropout])]),this.recurrentDropout=math_utils_min([1,math_utils_max([0,null==t.recurrentDropout?0:t.recurrentDropout])]),this.dropoutFunc=t.dropoutFunc,this.implementation=t.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){let r=(t=getExactlyOneShape(t))[t.length-1];this.kernel=this.addWeight("kernel",[r,3*this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,3*this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[3*this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,r){return globals_tidy(()=>{let a,n;if(2!==t.length)throw new errors_ValueError(`GRUCell expects 2 input Tensors (inputs, h, c), got ${t.length}.`);let s=null!=r.training&&r.training,i=t[1];t=t[0],0sE(t),rate:this.dropout,training:s,count:3,dropoutFunc:this.dropoutFunc})),0sE(i),rate:this.recurrentDropout,training:s,count:3,dropoutFunc:this.dropoutFunc}));let o=this.dropoutMask,l=this.recurrentDropoutMask;0{null!=this.cell.dropoutMask&&(globals_dispose(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(globals_dispose(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);let a=null==r?null:r.mask,n=null==r?null:r.training,s=null==r?null:r.initialState;return super.call(t,{mask:a,training:n,initialState:s})})}static fromConfig(t,r){return 0===r.implmentation&&(r.implementation=1),new t(r)}};recurrent_GRU.className="GRU",registerClass(recurrent_GRU);let recurrent_LSTMCell=class recurrent_LSTMCell extends recurrent_RNNCell{constructor(t){super(t),this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=t.units,assertPositiveInteger(this.units,"units"),this.activation=getActivation(void 0===t.activation?this.DEFAULT_ACTIVATION:t.activation),this.recurrentActivation=getActivation(void 0===t.recurrentActivation?this.DEFAULT_RECURRENT_ACTIVATION:t.recurrentActivation),this.useBias=null==t.useBias||t.useBias,this.kernelInitializer=getInitializer(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=getInitializer(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=getInitializer(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=t.unitForgetBias,this.kernelRegularizer=getRegularizer(t.kernelRegularizer),this.recurrentRegularizer=getRegularizer(t.recurrentRegularizer),this.biasRegularizer=getRegularizer(t.biasRegularizer),this.kernelConstraint=getConstraint(t.kernelConstraint),this.recurrentConstraint=getConstraint(t.recurrentConstraint),this.biasConstraint=getConstraint(t.biasConstraint),this.dropout=math_utils_min([1,math_utils_max([0,null==t.dropout?0:t.dropout])]),this.recurrentDropout=math_utils_min([1,math_utils_max([0,null==t.recurrentDropout?0:t.recurrentDropout])]),this.dropoutFunc=t.dropoutFunc,this.implementation=t.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){var r;let a,n=(t=getExactlyOneShape(t))[t.length-1];if(this.kernel=this.addWeight("kernel",[n,4*this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,4*this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){if(this.unitForgetBias){let t=this.biasInitializer,n=this.units;a=new((r=class extends Initializer{apply(r,a){let s=t.apply([n]),i=new initializers_Ones().apply([n]),o=t.apply([2*n]);return concatAlongFirstAxis(concatAlongFirstAxis(s,i),o)}}).className="CustomInit",r)}else a=this.biasInitializer;this.bias=this.addWeight("bias",[4*this.units],null,a,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(t,r){return globals_tidy(()=>{let a,n,s=null!=r.training&&r.training;if(3!==t.length)throw new errors_ValueError(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${t.length}.`);let i=t[1],o=t[2];t=t[0],0sE(t),rate:this.dropout,training:s,count:4,dropoutFunc:this.dropoutFunc})),0sE(i),rate:this.recurrentDropout,training:s,count:4,dropoutFunc:this.dropoutFunc}));let l=this.dropoutMask,u=this.recurrentDropoutMask;0{null!=this.cell.dropoutMask&&(globals_dispose(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(globals_dispose(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);let a=null==r?null:r.mask,n=null==r?null:r.training,s=null==r?null:r.initialState;return super.call(t,{mask:a,training:n,initialState:s})})}static fromConfig(t,r){return 0===r.implmentation&&(r.implementation=1),new t(r)}};recurrent_LSTM.className="LSTM",registerClass(recurrent_LSTM);let recurrent_StackedRNNCells=class recurrent_StackedRNNCells extends recurrent_RNNCell{constructor(t){super(t),this.cells=t.cells}get stateSize(){let t=[];for(let r of this.cells.slice().reverse())Array.isArray(r.stateSize)?t.push(...r.stateSize):t.push(r.stateSize);return t}call(t,r){return globals_tidy(()=>{let a,n=t.slice(1),s=[];for(let t of this.cells.slice().reverse())Array.isArray(t.stateSize)?s.push(n.splice(0,t.stateSize.length)):s.push(n.splice(0,1));s.reverse();let i=[];for(let o=0;o{nameScope(`RNNCell_${n}`,()=>{a.build(t),r=Array.isArray(a.stateSize)?a.stateSize[0]:a.stateSize,t=[t[0],r]})}),this.built=!0}getConfig(){let t=super.getConfig(),r=this.cells.map(t=>({className:t.getClassName(),config:t.getConfig()}));return Object.assign(Object.assign({},t),{cells:r})}static fromConfig(t,r,a={}){let n=[];for(let t of r.cells)n.push(serialization_deserialize(t,a));return new t({cells:n})}get trainableWeights(){if(!this.trainable)return[];let t=[];for(let r of this.cells)t.push(...r.trainableWeights);return t}get nonTrainableWeights(){let t=[];for(let r of this.cells)t.push(...r.nonTrainableWeights);if(!this.trainable){let r=[];for(let t of this.cells)r.push(...t.trainableWeights);return r.concat(t)}return t}getWeights(){let t=[];for(let r of this.cells)t.push(...r.weights);return batchGetValue(t)}setWeights(t){let r=[];for(let a of this.cells){let n=a.weights.length,s=t.splice(n);for(let t=0;t{var t;return null!=i?i(r(),a):(t=r(),globals_tidy(()=>i$(t,a,void 0,void 0)))},createMask=()=>inTrainPhase(droppedInputs,r,n);return!s||s<=1?keep(createMask().clone()):Array(s).fill(void 0).map(createMask).map(t=>keep(t.clone()))}recurrent_StackedRNNCells.className="StackedRNNCells",registerClass(recurrent_StackedRNNCells);var __rest=function(t,r){var a={};for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&0>r.indexOf(n)&&(a[n]=t[n]);if(null!=t&&"function"==typeof Object.getOwnPropertySymbols)for(var s=0,n=Object.getOwnPropertySymbols(t);sr.indexOf(n[s])&&Object.prototype.propertyIsEnumerable.call(t,n[s])&&(a[n[s]]=t[n[s]]);return a};let ConvRNN2D=class ConvRNN2D extends recurrent_RNN{constructor(t){if(t.unroll)throw new errors_NotImplementedError("Unrolling is not possible with convolutional RNNs.");if(Array.isArray(t.cell))throw new errors_NotImplementedError("It is not possible at the moment to stack convolutional cells.");super(t),this.inputSpec=[new InputSpec({ndim:5})]}call(t,r){return globals_tidy(()=>{if(null!=this.cell.dropoutMask&&(globals_dispose(this.cell.dropoutMask),this.cell.dropoutMask=null),null!=this.cell.recurrentDropoutMask&&(globals_dispose(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),r&&r.constants)throw new errors_ValueError("ConvRNN2D cell does not support constants");let a=null==r?null:r.mask,n=null==r?null:r.training,s=null==r?null:r.initialState;return super.call(t,{mask:a,training:n,initialState:s})})}computeOutputShape(t){let r=this.computeSingleOutputShape(t);return this.returnSequences||(r=[r[0],...r.slice(2)]),this.returnState&&(r=[r,...[,,].fill([t[0],...r.slice(-3)])]),r}getInitialState(t){return globals_tidy(()=>{let{stateSize:r}=this.cell,a=t.shape,n=this.computeSingleOutputShape(a),s=zeros([n[0],...n.slice(2)]);return Array.isArray(r)?Array(r.length).fill(s):[s]})}resetStates(t,r=!1){globals_tidy(()=>{if(!this.stateful)throw new AttributeError("Cannot call resetStates() on an RNN Layer that is not stateful.");let a=this.inputSpec[0].shape,n=this.computeSingleOutputShape(a),s=[n[0],...n.slice(2)];if(null==a[0])throw new errors_ValueError("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(null==this.getStates())Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>zeros(s)):this.states_=[zeros(s)];else if(null==t)globals_dispose(this.states_),null!=this.keptStates&&(globals_dispose(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>zeros(s)):this.states_[0]=zeros(s);else{if(Array.isArray(t)||(t=[t]),t.length!==this.states_.length)throw new errors_ValueError(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${t.length} state value(s). Input received: ${t}`);r?this.keptStates.push(this.states_.slice()):globals_dispose(this.states_);for(let r=0;rkeep(t.clone()))})}computeSingleOutputShape(t){let{dataFormat:r,filters:a,kernelSize:n,padding:s,strides:i,dilationRate:o}=this.cell,l="channelsFirst"===r,u=t[l?3:2],p=t[l?4:3],m=convOutputLength(u,n[0],s,i[0],o[0]),y=convOutputLength(p,n[1],s,i[1],o[1]);return[...t.slice(0,2),...l?[a,m,y]:[m,y,a]]}};ConvRNN2D.className="ConvRNN2D";let convolutional_recurrent_ConvLSTM2DCell=class convolutional_recurrent_ConvLSTM2DCell extends recurrent_LSTMCell{constructor(t){const{filters:r,kernelSize:a,strides:n,padding:s,dataFormat:i,dilationRate:o}=t;super(Object.assign(Object.assign({},t),{units:r})),this.filters=r,assertPositiveInteger(this.filters,"filters"),this.kernelSize=normalizeArray(a,2,"kernelSize"),this.kernelSize.forEach(t=>assertPositiveInteger(t,"kernelSize")),this.strides=normalizeArray(n||1,2,"strides"),this.strides.forEach(t=>assertPositiveInteger(t,"strides")),this.padding=s||"valid",checkPaddingMode(this.padding),this.dataFormat=i||"channelsLast",common_checkDataFormat(this.dataFormat),this.dilationRate=normalizeArray(o||1,2,"dilationRate"),this.dilationRate.forEach(t=>assertPositiveInteger(t,"dilationRate"))}build(t){var r;t=getExactlyOneShape(t);let a="channelsFirst"===this.dataFormat?1:t.length-1;if(null==t[a])throw new errors_ValueError(`The channel dimension of the input should be defined. Found ${t[a]}`);let n=t[a],s=this.kernelSize.concat([n,4*this.filters]);this.kernel=this.addWeight("kernel",s,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);let i=this.kernelSize.concat([this.filters,4*this.filters]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",i,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let t;if(this.unitForgetBias){let a=this.biasInitializer,n=this.filters;t=new((r=class extends Initializer{apply(t,r){return concatenate([a.apply([n]),ones_ones([n]),a.apply([2*n])])}}).className="CustomInit",r)}else t=this.biasInitializer;this.bias=this.addWeight("bias",[4*this.filters],null,t,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(t,r){return globals_tidy(()=>{if(3!==t.length)throw new errors_ValueError(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${t.length}.`);let a=r.training||!1,n=t[0],s=t[1],i=t[2];0sE(n),rate:this.dropout,training:a,count:4,dropoutFunc:this.dropoutFunc}));let o=this.dropoutMask,applyDropout=(t,r,a)=>r&&r[a]?aD(r[a],t):t,l=applyDropout(n,o,0),u=applyDropout(n,o,1),p=applyDropout(n,o,2),m=applyDropout(n,o,3);0sE(s),rate:this.recurrentDropout,training:a,count:4,dropoutFunc:this.dropoutFunc}));let y=this.recurrentDropoutMask,_=applyDropout(s,y,0),w=applyDropout(s,y,1),I=applyDropout(s,y,2),C=applyDropout(s,y,3),[E,A,$,F]=ih(this.kernel.read(),4,3),[D,P,L,z]=this.useBias?ih(this.bias.read(),4):[null,null,null,null];l=this.inputConv(l,E,D,this.padding),u=this.inputConv(u,A,P,this.padding),p=this.inputConv(p,$,L,this.padding),m=this.inputConv(m,F,z,this.padding);let[B,G,j,K]=ih(this.recurrentKernel.read(),4,3);_=this.recurrentConv(_,B),w=this.recurrentConv(w,G),I=this.recurrentConv(I,j),C=this.recurrentConv(C,K);let H=this.recurrentActivation.apply(a$(l,_)),q=this.recurrentActivation.apply(a$(u,w)),Z=a$(aD(q,i),aD(H,this.activation.apply(a$(p,I)))),Q=aD(this.recurrentActivation.apply(a$(m,C)),this.activation.apply(Z));return[Q,Q,Z]})}getConfig(){let t=super.getConfig(),{units:r}=t,a=__rest(t,["units"]),n={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign(Object.assign({},a),n)}inputConv(t,r,a,n){let s=n_(t,r,this.strides,n||"valid","channelsFirst"===this.dataFormat?"NCHW":"NHWC",this.dilationRate);return a?biasAdd(s,a,this.dataFormat):s}recurrentConv(t,r){return n_(t,r,1,"same","channelsFirst"===this.dataFormat?"NCHW":"NHWC")}};convolutional_recurrent_ConvLSTM2DCell.className="ConvLSTM2DCell",registerClass(convolutional_recurrent_ConvLSTM2DCell);let convolutional_recurrent_ConvLSTM2D=class convolutional_recurrent_ConvLSTM2D extends ConvRNN2D{constructor(t){const r=new convolutional_recurrent_ConvLSTM2DCell(t);super(Object.assign(Object.assign({},t),{cell:r}))}static fromConfig(t,r){return new t(r)}};convolutional_recurrent_ConvLSTM2D.className="ConvLSTM2D",registerClass(convolutional_recurrent_ConvLSTM2D);let core_Dropout=class core_Dropout extends Layer{constructor(t){super(t),this.rate=Math.max(Math.min(t.rate,1),0),this.noiseShape=t.noiseShape,this.seed=t.seed,this.supportsMasking=!0}getNoiseShape(t){if(null==this.noiseShape)return this.noiseShape;let r=t.shape,a=[];for(let t=0;t{this.invokeCallHook(t,r);let a=getExactlyOneTensor(t);if(0{var t,r;return t=this.rate,r=this.seed,globals_tidy(()=>i$(a,t,n,r))},()=>a,t)}return t})}getConfig(){let t={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed};return Object.assign(t,super.getConfig()),t}dispose(){return super.dispose()}};core_Dropout.className="Dropout",registerClass(core_Dropout);let core_SpatialDropout1D=class core_SpatialDropout1D extends core_Dropout{constructor(t){super(t),this.inputSpec=[{ndim:3}]}getNoiseShape(t){let r=t.shape;return[r[0],1,r[2]]}};core_SpatialDropout1D.className="SpatialDropout1D",registerClass(core_SpatialDropout1D);let core_Dense=class core_Dense extends Layer{constructor(t){if(super(t),this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",null==t.batchInputShape&&null==t.inputShape&&null!=t.inputDim){let r=null;null!=t.batchSize&&(r=t.batchSize),this.batchInputShape=[r,t.inputDim]}this.units=t.units,assertPositiveInteger(this.units,"units"),this.activation=getActivation(t.activation),null!=t.useBias&&(this.useBias=t.useBias),this.kernelInitializer=getInitializer(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=getInitializer(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=getConstraint(t.kernelConstraint),this.biasConstraint=getConstraint(t.biasConstraint),this.kernelRegularizer=getRegularizer(t.kernelRegularizer),this.biasRegularizer=getRegularizer(t.biasRegularizer),this.activityRegularizer=getRegularizer(t.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(t){let r=(t=getExactlyOneShape(t))[t.length-1];null==this.kernel&&(this.kernel=this.addWeight("kernel",[r,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:r}}],this.built=!0}computeOutputShape(t){let r=(t=getExactlyOneShape(t)).slice();return r[r.length-1]=this.units,r}call(t,r){return globals_tidy(()=>{let a;this.invokeCallHook(t,r);let n=getExactlyOneTensor(t),s=mapActivationToFusedKernel(this.activation.getClassName());return null!=s?a=tfjs_backend_dot(n,this.kernel.read(),s,this.bias?this.bias.read():null):(a=tfjs_backend_dot(n,this.kernel.read()),null!=this.bias&&(a=biasAdd(a,this.bias.read())),null!=this.activation&&(a=this.activation.apply(a))),a})}getConfig(){let t={units:this.units,activation:serializeActivation(this.activation),useBias:this.useBias,kernelInitializer:serializeKerasObject(this.kernelInitializer),biasInitializer:serializeKerasObject(this.biasInitializer),kernelRegularizer:serializeKerasObject(this.kernelRegularizer),biasRegularizer:serializeKerasObject(this.biasRegularizer),activityRegularizer:serializeKerasObject(this.activityRegularizer),kernelConstraint:serializeKerasObject(this.kernelConstraint),biasConstraint:serializeKerasObject(this.biasConstraint)};return Object.assign(t,super.getConfig()),t}};core_Dense.className="Dense",registerClass(core_Dense);let core_Flatten=class core_Flatten extends Layer{constructor(t){super(t=t||{}),this.inputSpec=[{minNDim:3}],this.dataFormat=t.dataFormat}computeOutputShape(t){for(let r of(t=getExactlyOneShape(t)).slice(1))if(null==r)throw new errors_ValueError(`The shape of the input to "Flatten" is not fully defined (got ${t.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[t[0],arrayProd(t,1)]}call(t,r){return globals_tidy(()=>{this.invokeCallHook(t,r);let a=getExactlyOneTensor(t);if("channelsFirst"===this.dataFormat&&a.rank>1){let t=[0];for(let r=2;r{this.invokeCallHook(t,r);let a=getExactlyOneTensor(t);return this.activation.apply(a)})}getConfig(){let t={activation:serializeActivation(this.activation)};return Object.assign(t,super.getConfig()),t}};core_Activation.className="Activation",registerClass(core_Activation);let core_RepeatVector=class core_RepeatVector extends Layer{constructor(t){super(t),this.n=t.n,this.inputSpec=[{ndim:2}]}computeOutputShape(t){return[t[0],this.n,t[1]]}call(t,r){return globals_tidy(()=>repeat(t=getExactlyOneTensor(t),this.n))}getConfig(){let t={n:this.n};return Object.assign(t,super.getConfig()),t}};core_RepeatVector.className="RepeatVector",registerClass(core_RepeatVector);let core_Reshape=class core_Reshape extends Layer{constructor(t){super(t),this.targetShape=t.targetShape;for(let t=0;t{this.invokeCallHook(t,r);let a=getExactlyOneTensor(t),n=a.shape;return a6(a,n.slice(0,1).concat(this.fixUnknownDimension(n.slice(1),this.targetShape)))})}getConfig(){let t={targetShape:this.targetShape};return Object.assign(t,super.getConfig()),t}};core_Reshape.className="Reshape",registerClass(core_Reshape);let core_Permute=class core_Permute extends Layer{constructor(t){if(super(t),null==t.dims)throw Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(t.dims))throw Error(`Permute constructor requires \`dims\` to be an Array, but received ${t.dims} instead.`);const r=math_utils_range(1,t.dims.length+1);if(!arraysEqual(t.dims.slice().sort(),r))throw Error("Invalid permutation `dims`: "+JSON.stringify(t.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=t.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new InputSpec({ndim:this.dims.length+1})]}computeOutputShape(t){let r=(t=getExactlyOneShape(t)).slice();return this.dims.forEach((a,n)=>{r[n+1]=t[a]}),r}call(t,r){return iI(getExactlyOneTensor(t),this.dimsIncludingBatch)}getConfig(){let t={dims:this.dims};return Object.assign(t,super.getConfig()),t}};core_Permute.className="Permute",registerClass(core_Permute);let core_Masking=class core_Masking extends Layer{constructor(t){super(null==t?{}:t),this.supportsMasking=!0,null!=t?this.maskValue=null==t.maskValue?0:t.maskValue:this.maskValue=0}computeOutputShape(t){return t}getConfig(){let t=super.getConfig(),r={maskValue:this.maskValue};return Object.assign(r,t),r}computeMask(t,r){return aZ(sN(getExactlyOneTensor(t),this.maskValue),-1)}call(t,r){return globals_tidy(()=>{this.invokeCallHook(t,r);let a=getExactlyOneTensor(t),n=aZ(sN(a,this.maskValue),-1,!0);return aD(a,aE(n,a.dtype))})}};core_Masking.className="Masking",registerClass(core_Masking);let embeddings_Embedding=class embeddings_Embedding extends Layer{constructor(t){if(super(t),this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",null==t.batchInputShape&&null==t.inputShape){let r=null;null!=t.batchSize&&(r=t.batchSize),null==t.inputLength?this.batchInputShape=[r,null]:this.batchInputShape=[r].concat(toList(t.inputLength))}this.inputDim=t.inputDim,assertPositiveInteger(this.inputDim,"inputDim"),this.outputDim=t.outputDim,assertPositiveInteger(this.outputDim,"outputDim"),this.embeddingsInitializer=getInitializer(t.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=getRegularizer(t.embeddingsRegularizer),this.activityRegularizer=getRegularizer(t.activityRegularizer),this.embeddingsConstraint=getConstraint(t.embeddingsConstraint),this.maskZero=t.maskZero,this.supportsMasking=t.maskZero,this.inputLength=t.inputLength}build(t){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(t){}computeMask(t,r){return globals_tidy(()=>this.maskZero?sN(t=getExactlyOneTensor(t),aM(t)):null)}computeOutputShape(t){if(t=getExactlyOneShape(t),null==this.inputLength)return[...t,this.outputDim];let r=toList(this.inputLength);if(r.length!==t.length-1)throw new errors_ValueError(`"inputLength" is ${this.inputLength}, but received input shape has shape ${t}`);{let a=0;for(let n=0;n{this.invokeCallHook(t,r);let a=getExactlyOneTensor(t);"int32"!==a.dtype&&(a=aE(a,"int32"));let n=tfjs_backend_gather(this.embeddings.read(),a6(a,[a.size]));return a6(n,getExactlyOneShape(this.computeOutputShape(a.shape)))})}getConfig(){let t={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:serializeKerasObject(this.embeddingsInitializer),embeddingsRegularizer:serializeKerasObject(this.embeddingsRegularizer),activityRegularizer:serializeKerasObject(this.activityRegularizer),embeddingsConstraint:serializeKerasObject(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength};return Object.assign(t,super.getConfig()),t}};embeddings_Embedding.className="Embedding",registerClass(embeddings_Embedding);let Merge=class Merge extends Layer{constructor(t){super(t||{}),this.supportsMasking=!0}mergeFunction(t){throw new errors_NotImplementedError}computeElementwiseOpOutputShape(t,r){if(null==t||null==r)return null;if(t.length1)throw new errors_ValueError(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(t)}.`);let a=null==t[0]?null:t[0].slice(1);for(let r=1;rt.length);-1===t.indexOf(null)&&1===generic_utils_unique(n).length?this.reshapeRequired=!1:this.reshapeRequired=!0}call(t,r){return globals_tidy(()=>{if(!this.reshapeRequired)return this.mergeFunction(t);{let r=[],a=t.map(t=>t.rank);if(-1===a.indexOf(null)){let n=math_utils_max(a);for(let a of t){let t=a.rank;for(let r=0;r1){let s=math_utils_range(1,t).concat([0]);r.push(iI(n,s)),a=!0}else r.push(n)}let n=this.mergeFunction(r),s=n.rank;if(a)if(null==s){let t=n.shape,r=t.length,a=t[r-1],s=[a].concat(t.slice(0,t.length-1));n=a6(iI(a6(n,[-1,a]),[1,0]),s)}else s>1&&(n=iI(n,[s-1].concat(math_utils_range(0,s-1))));return n}}})}computeOutputShape(t){let r;r=null==t[0]?null:t[0].slice(1);for(let a=1;a{if(null==r)return null;if(!Array.isArray(r))throw new errors_ValueError("`mask` should be an Array");if(!Array.isArray(t))throw new errors_ValueError("`inputs` should be an Array");if(r.length!==t.length)throw new errors_ValueError(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${t.length} vs ${r.length})`);if(r.every(t=>null==t))return null;let a=(r=r.map(t=>null==t?t:nZ(t,0)))[0];for(let t=1;t{let r=t[0].clone();for(let a=1;a{let r=t[0].clone();for(let a=1;a{let r=t[0].clone();for(let a=1;a{let r=t[0];for(let a=1;a{let r=t[0];for(let a=1;a1)throw new errors_ValueError("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(t))}mergeFunction(t){return globals_tidy(()=>concatenate(t,this.axis))}computeOutputShape(t){if(!(Array.isArray(t)&&Array.isArray(t[0])))throw new errors_ValueError("A `Concatenate` layer should be called on a list of inputs.");let r=t[0].slice(),a=this.axis<0?r.length+this.axis:this.axis;for(let n of t.slice(1)){if(null==r[a]||null==n[a]){r[a]=null;break}r[a]+=n[a]}return r}computeMask(t,r){if(null==r)return null;if(!Array.isArray(r))throw new errors_ValueError("`mask` should be an array for Concatenate");if(!Array.isArray(t))throw new errors_ValueError("`inputs` should be an array for Concatenate");if(r.length!==t.length)throw new errors_ValueError(`Mismatch in the length of mask (${r.length}) and the legnth of inputs (${t.length})`);return globals_tidy(()=>{let a=!0;if(r.forEach(t=>{if(null!=t){a=!1;return}}),a)return null;let n=[];for(let a=0;a3||r.shape.length>3)throw new errors_NotImplementedError("batchDot is not implemented for tensors of 4D or higher rank yet");if(assert(t.shape.length>=2,()=>`batchDot requires the rank of x to be >= 2, but got ${t.shape.length}`),assert(t.shape.length>=2,()=>`batchDot requires the rank of y to be >= 2, but got ${r.shape.length}`),"number"==typeof a&&(a=[a,a]),"complex64"===t.dtype||"complex64"===r.dtype)throw new errors_NotImplementedError("batchDot is not implemented for complex64-type Tensors yet.");let n=t.shape.length,s=r.shape.length;null==a&&(a=[n-1,s-2]);let i=a;return globals_tidy(()=>{let a,o;if(n>s){a=n-s;let t=[];for(let r=0;rn){a=s-n;let r=[];for(let t=0;t0){let t;t=n>s?n+s-3:n-1;let r=[];for(let n=t;n"A `Dot` layer should be called on a list of exactly 2 inputs.");let r=t[0],a=t[1];if(r.length>3||a.length>3)throw new errors_NotImplementedError("Dot layer does not support tensors of 4D or higher rank yet.");let n=this.interpretAxes(r,a);if(r[n[0]]!==a[n[1]])throw new errors_ValueError(`Dimension incompatibility: ${r[n[0]]} !== ${a[n[1]]}`)}mergeFunction(t){let r;if(2!==t.length)throw new errors_ValueError(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${t.length} input(s).`);let a=t[0],n=t[1];return r=Array.isArray(this.axes)?this.axes.map((r,a)=>interpretAxis(r,t[a].shape.length)):[interpretAxis(this.axes,a.shape.length),interpretAxis(this.axes,n.shape.length)],this.normalize&&(a=l2Normalize(a,r[0]),n=l2Normalize(n,r[1])),batchDot(a,n,r)}interpretAxes(t,r){return Array.isArray(this.axes)?this.axes:[interpretAxis(this.axes,t.length),interpretAxis(this.axes,r.length)]}computeOutputShape(t){assert(Array.isArray(t)&&2===t.length&&Array.isArray(t[0])&&Array.isArray(t[1]),()=>"A `Dot` layer should be called on a list of exactly 2 inputs.");let r=t[0].slice(),a=t[1].slice();if(r.length>3||a.length>3)throw new errors_NotImplementedError("Dot layer does not support tensors of 4D or higher rank yet.");let n=this.interpretAxes(r,a);r.splice(n[0],1),a.splice(n[1],1),a.splice(0,1);let s=r.concat(a);return 1===s.length&&s.push(1),s}computeMask(t,r){return null}getConfig(){let t={axes:this.axes,normalize:this.normalize};return Object.assign(t,super.getConfig()),t}};merge_Dot.className="Dot",registerClass(merge_Dot);let noise_GaussianNoise=class noise_GaussianNoise extends Layer{constructor(t){super(t),this.supportsMasking=!0,this.stddev=t.stddev}computeOutputShape(t){return t}getConfig(){let t=super.getConfig(),r={stddev:this.stddev};return Object.assign(r,t),r}call(t,r){return globals_tidy(()=>{this.invokeCallHook(t,r);let a=getExactlyOneTensor(t);return inTrainPhase(()=>a$(tfjs_backend_randomNormal(a.shape,0,this.stddev),a),()=>a,r.training||!1)})}};noise_GaussianNoise.className="GaussianNoise",registerClass(noise_GaussianNoise);let noise_GaussianDropout=class noise_GaussianDropout extends Layer{constructor(t){super(t),this.supportsMasking=!0,this.rate=t.rate}computeOutputShape(t){return t}getConfig(){let t=super.getConfig(),r={rate:this.rate};return Object.assign(r,t),r}call(t,r){return globals_tidy(()=>{this.invokeCallHook(t,r);let a=getExactlyOneTensor(t);return this.rate>0&&this.rate<1?inTrainPhase(()=>{let t=Math.sqrt(this.rate/(1-this.rate));return aD(a,tfjs_backend_randomNormal(a.shape,1,t))},()=>a,r.training||!1):a})}};noise_GaussianDropout.className="GaussianDropout",registerClass(noise_GaussianDropout);let noise_AlphaDropout=class noise_AlphaDropout extends Layer{constructor(t){super(t),this.supportsMasking=!0,this.rate=t.rate,this.noiseShape=t.noiseShape}_getNoiseShape(t){return this.noiseShape||getExactlyOneTensor(t).shape}computeOutputShape(t){return t}getConfig(){let t=super.getConfig(),r={rate:this.rate};return Object.assign(r,t),r}call(t,r){return globals_tidy(()=>{if(this.rate<1&&this.rate>0){let a=this._getNoiseShape(t);return inTrainPhase(()=>{let r=getExactlyOneTensor(t),n=n4(sq(a),this.rate);n=aE(n,"float32");let s=((1-this.rate)*(1+3.09091329228798*this.rate))**-.5,i=-(-1.7580993408473766*s)*this.rate,o=a$(aD(r,n),aD(a$(n,-1),-1.7580993408473766));return a$(aD(o,s),i)},()=>getExactlyOneTensor(t),r.training||!1)}return t})}};function batchNormalization(t,r,a,n,s,i=.001){let o;if(2===t.rank)o=no(t,r,a,n,s,i);else if(3===t.rank)o=nl(t,r,a,n,s,i);else if(4===t.rank)o=nu(t,r,a,n,s,i);else throw new errors_NotImplementedError(`batchNormalization is not implemented for array of rank ${t.rank} yet`);return o}function regularNormalizeBatchInTraining(t,r,a,n,s=.001){return globals_tidy(()=>{let i=sS(t,n),o=i.mean,l=i.variance;return[batchNormalization(t,o,l,a,r,s),o,l]})}function broadcastNormalizeBatchInTraining(t,r,a,n,s=.001){return globals_tidy(()=>{let i=sS(t,n),o=i.mean,l=i.variance,u=[];for(let r of math_utils_range(0,t.rank))-1!==n.indexOf(r)?u.push(1):u.push(t.shape[r]);let p=a6(o,u),m=a6(l,u),y=null==r?null:a6(r,u);return[batchNormalization(t,p,m,null==a?null:a6(a,u),y,s),o,l]})}function normalizeBatchInTraining(t,r,a,n,s=.001){return arraysEqual(n.slice().sort(),math_utils_range(0,t.rank-1))?regularNormalizeBatchInTraining(t,r,a,n,s):broadcastNormalizeBatchInTraining(t,r,a,n,s)}noise_AlphaDropout.className="AlphaDropout",registerClass(noise_AlphaDropout);let normalization_BatchNormalization=class normalization_BatchNormalization extends Layer{constructor(t){null==t&&(t={}),super(t),this.supportsMasking=!0,this.axis=null==t.axis?-1:t.axis,this.momentum=null==t.momentum?.99:t.momentum,this.epsilon=null==t.epsilon?.001:t.epsilon,this.center=null==t.center||t.center,this.scale=null==t.scale||t.scale,this.betaInitializer=getInitializer(t.betaInitializer||"zeros"),this.gammaInitializer=getInitializer(t.gammaInitializer||"ones"),this.movingMeanInitializer=getInitializer(t.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=getInitializer(t.movingVarianceInitializer||"ones"),this.betaConstraint=getConstraint(t.betaConstraint),this.gammaConstraint=getConstraint(t.gammaConstraint),this.betaRegularizer=getRegularizer(t.betaRegularizer),this.gammaRegularizer=getRegularizer(t.gammaRegularizer)}build(t){t=getExactlyOneShape(t);let r=this.axis>=0?this.axis:this.axis+t.length,a=t[r];if(null==a)throw new errors_ValueError(`Axis ${r} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(t)}.`);this.inputSpec=[new InputSpec({ndim:t.length,axes:{[r]:a}})];let n=[a];this.scale&&(this.gamma=this.addWeight("gamma",n,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",n,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",n,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",n,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(t,r){return globals_tidy(()=>{let a=null!=r.training&&r.training,n=getExactlyOneTensor(t),s=n.shape,i=s.length,o=math_utils_range(0,i),l=this.axis>=0?this.axis:this.axis+i;o.splice(l,1);let u=pyListRepeat(1,i);u[l]=s[l];let p=o.slice();p.sort();let m=!arraysEqual(p,math_utils_range(0,i).slice(0,i-1)),normalizeInference=()=>m?batchNormalization(n,a6(this.movingMean.read(),u),a6(this.movingVariance.read(),u),this.center?a6(this.beta.read(),u):null,this.scale?a6(this.gamma.read(),u):null,this.epsilon):batchNormalization(n,this.movingMean.read(),this.movingVariance.read(),null==this.beta?null:this.beta.read(),null==this.gamma?null:this.gamma.read(),this.epsilon);if(!a)return normalizeInference();let[y,_,w]=normalizeBatchInTraining(n,this.gamma.read(),this.beta.read(),o,this.epsilon),doMovingAverage=(t,r,a)=>{globals_tidy(()=>{let n=t.read(),s=aD(aB(n,r),1-a);t.write(aB(n,s))})};return doMovingAverage(this.movingMean,_,this.momentum),doMovingAverage(this.movingVariance,w,this.momentum),y})}getConfig(){let t={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:serializeKerasObject(this.betaInitializer),gammaInitializer:serializeKerasObject(this.gammaInitializer),movingMeanInitializer:serializeKerasObject(this.movingMeanInitializer),movingVarianceInitializer:serializeKerasObject(this.movingVarianceInitializer),betaRegularizer:serializeKerasObject(this.betaRegularizer),gammaRegularizer:serializeKerasObject(this.gammaRegularizer),betaConstraint:serializeKerasObject(this.betaConstraint),gammaConstraint:serializeKerasObject(this.gammaConstraint)};return Object.assign(t,super.getConfig()),t}};normalization_BatchNormalization.className="BatchNormalization",registerClass(normalization_BatchNormalization);let normalization_LayerNormalization=class normalization_LayerNormalization extends Layer{constructor(t){if(null==t&&(t={}),super(t),this.axis=null==t.axis?-1:t.axis,"number"==typeof this.axis){if(!Number.isInteger(this.axis))throw Error(`Expected axis to be an integer, but received ${this.axis}`)}else if(Array.isArray(this.axis)){for(const t of this.axis)if(!Number.isInteger(t))throw Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}else throw Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);this.epsilon=null==t.epsilon?.001:t.epsilon,this.center=null==t.center||t.center,this.scale=null==t.scale||t.scale,this.betaInitializer=getInitializer(t.betaInitializer||"zeros"),this.gammaInitializer=getInitializer(t.gammaInitializer||"ones"),this.betaRegularizer=getRegularizer(t.betaRegularizer),this.gammaRegularizer=getRegularizer(t.gammaRegularizer),this.supportsMasking=!0}build(t){let r=(t=getExactlyOneShape(t)).length;"number"==typeof this.axis&&(this.axis=[this.axis]);for(let t=0;t=r)throw Error(`Invalid axis: ${t}`);if(this.axis.length!==generic_utils_unique(this.axis).length)throw Error(`Found duplicate axes in: ${this.axis}`);let a=this.axis.map(r=>t[r]);this.scale?this.gamma=this.addWeight("gamma",a,"float32",this.gammaInitializer,this.gammaRegularizer,!0):this.gamma=null,this.center?this.beta=this.addWeight("beta",a,"float32",this.betaInitializer,this.betaRegularizer,!0):this.beta=null,this.built=!0}call(t,r){let a=getExactlyOneTensor(t),n=a.shape,s=n.length;return globals_tidy(()=>{let{mean:t,variance:r}=sS(a,this.axis,!0),i=pyListRepeat(1,s);for(let t of this.axis)i[t]=n[t];let broadcast=t=>null!=t&&t.shape.length!==s?a6(t,i):t,o=this.scale?broadcast(this.gamma.read()):null,l=this.center?broadcast(this.beta.read()):null,u=[],p=[];for(let t=0;t{if(4!==t.rank)throw new errors_ValueError(`temporalPadding expects input tensor to be 4-D, but received a ${t.rank}-D tensor.`);if(null==r&&(r=[[1,1],[1,1]]),2!==r.length||2!==r[0].length||2!==r[1].length)throw new errors_ValueError("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(null==a&&(a=imageDataFormat()),"channelsLast"!==a&&"channelsFirst"!==a)throw new errors_ValueError(`Unknown data format: ${a}. Supported data formats are 'channelsLast' and 'channelsFirst.`);return s$(t,"channelsFirst"===a?[[0,0],[0,0],r[0],r[1]]:[[0,0],r[0],r[1],[0,0]])})}normalization_LayerNormalization.className="LayerNormalization",registerClass(normalization_LayerNormalization);let padding_ZeroPadding2D=class padding_ZeroPadding2D extends Layer{constructor(t){if(null==t&&(t={}),super(t),this.dataFormat=null==t.dataFormat?imageDataFormat():t.dataFormat,null==t.padding)this.padding=[[1,1],[1,1]];else if("number"==typeof t.padding)this.padding=[[t.padding,t.padding],[t.padding,t.padding]];else{let r,a;if(t.padding=t.padding,2!==t.padding.length)throw new errors_ValueError(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${t.padding.length} array.`);if("number"==typeof t.padding[0])r=[t.padding[0],t.padding[0]],a=[t.padding[1],t.padding[1]];else{if(t.padding=t.padding,2!==t.padding[0].length)throw new errors_ValueError(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${t.padding[0].length} array.`);if(r=t.padding[0],2!==t.padding[1].length)throw new errors_ValueError(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${t.padding[1].length} array.`);a=t.padding[1]}this.padding=[r,a]}this.inputSpec=[new InputSpec({ndim:4})]}computeOutputShape(t){let r,a;return(t=getExactlyOneShape(t),"channelsFirst"===this.dataFormat)?(r=null!=t[2]&&t[2]>=0?t[2]+this.padding[0][0]+this.padding[0][1]:null,a=null!=t[3]&&t[3]>=0?t[3]+this.padding[1][0]+this.padding[1][1]:null,[t[0],t[1],r,a]):(r=null!=t[1]&&t[1]>=0?t[1]+this.padding[0][0]+this.padding[0][1]:null,a=null!=t[2]&&t[2]>=0?t[2]+this.padding[1][0]+this.padding[1][1]:null,[t[0],r,a,t[3]])}call(t,r){return globals_tidy(()=>spatial2dPadding(getExactlyOneTensor(t),this.padding,this.dataFormat))}getConfig(){let t={padding:this.padding,dataFormat:this.dataFormat};return Object.assign(t,super.getConfig()),t}};function pool2d(t,r,a,n,s,i){return globals_tidy(()=>{let o;common_checkDataFormat(s),checkPoolMode(i),checkPaddingMode(n),null==a&&(a=[1,1]),null==n&&(n="valid"),null==s&&(s=imageDataFormat()),null==i&&(i="max"),t=preprocessConv2DInput(t,s);let l="same"===n?"same":"valid";return o="max"===i?sg(t,r,a,l):a5(t,r,a,l),"channelsFirst"===s&&(o=iI(o,[0,3,1,2])),o})}function pool3d(t,r,a,n,s,i){return globals_tidy(()=>{let o;common_checkDataFormat(s),checkPoolMode(i),checkPaddingMode(n),null==a&&(a=[1,1,1]),null==n&&(n="valid"),null==s&&(s=imageDataFormat()),null==i&&(i="max"),t=preprocessConv3DInput(t,s);let l="same"===n?"same":"valid";return o="max"===i?sy(t,r,a,l):a8(t,r,a,l),"channelsFirst"===s&&(o=iI(o,[0,4,1,2,3])),o})}padding_ZeroPadding2D.className="ZeroPadding2D",registerClass(padding_ZeroPadding2D);let Pooling1D=class Pooling1D extends Layer{constructor(t){if(null==t.poolSize&&(t.poolSize=2),super(t),"number"==typeof t.poolSize)this.poolSize=[t.poolSize];else if(Array.isArray(t.poolSize)&&1===t.poolSize.length&&"number"==typeof t.poolSize[0])this.poolSize=t.poolSize;else throw new errors_ValueError(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(t.poolSize)}`);if(assertPositiveInteger(this.poolSize,"poolSize"),null==t.strides)this.strides=this.poolSize;else if("number"==typeof t.strides)this.strides=[t.strides];else if(Array.isArray(t.strides)&&1===t.strides.length&&"number"==typeof t.strides[0])this.strides=t.strides;else throw new errors_ValueError(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(t.strides)}`);assertPositiveInteger(this.strides,"strides"),this.padding=null==t.padding?"valid":t.padding,checkPaddingMode(this.padding),this.inputSpec=[new InputSpec({ndim:3})]}computeOutputShape(t){let r=convOutputLength((t=getExactlyOneShape(t))[1],this.poolSize[0],this.padding,this.strides[0]);return[t[0],r,t[2]]}call(t,r){return globals_tidy(()=>(this.invokeCallHook(t,r),t=tfjs_backend_expandDims(getExactlyOneTensor(t),2),im(this.poolingFunction(getExactlyOneTensor(t),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast"),[2])))}getConfig(){let t={poolSize:this.poolSize,padding:this.padding,strides:this.strides};return Object.assign(t,super.getConfig()),t}};let pooling_MaxPooling1D=class pooling_MaxPooling1D extends Pooling1D{constructor(t){super(t)}poolingFunction(t,r,a,n,s){return common_checkDataFormat(s),checkPaddingMode(n),pool2d(t,r,a,n,s,"max")}};pooling_MaxPooling1D.className="MaxPooling1D",registerClass(pooling_MaxPooling1D);let pooling_AveragePooling1D=class pooling_AveragePooling1D extends Pooling1D{constructor(t){super(t)}poolingFunction(t,r,a,n,s){return common_checkDataFormat(s),checkPaddingMode(n),pool2d(t,r,a,n,s,"avg")}};pooling_AveragePooling1D.className="AveragePooling1D",registerClass(pooling_AveragePooling1D);let Pooling2D=class Pooling2D extends Layer{constructor(t){if(null==t.poolSize&&(t.poolSize=[2,2]),super(t),this.poolSize=Array.isArray(t.poolSize)?t.poolSize:[t.poolSize,t.poolSize],null==t.strides)this.strides=this.poolSize;else if(Array.isArray(t.strides)){if(2!==t.strides.length)throw new errors_ValueError(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${t.strides.length}.`);this.strides=t.strides}else this.strides=[t.strides,t.strides];assertPositiveInteger(this.poolSize,"poolSize"),assertPositiveInteger(this.strides,"strides"),this.padding=null==t.padding?"valid":t.padding,this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,common_checkDataFormat(this.dataFormat),checkPaddingMode(this.padding),this.inputSpec=[new InputSpec({ndim:4})]}computeOutputShape(t){t=getExactlyOneShape(t);let r="channelsFirst"===this.dataFormat?t[2]:t[1],a="channelsFirst"===this.dataFormat?t[3]:t[2];return(r=convOutputLength(r,this.poolSize[0],this.padding,this.strides[0]),a=convOutputLength(a,this.poolSize[1],this.padding,this.strides[1]),"channelsFirst"===this.dataFormat)?[t[0],t[1],r,a]:[t[0],r,a,t[3]]}call(t,r){return globals_tidy(()=>(this.invokeCallHook(t,r),this.poolingFunction(getExactlyOneTensor(t),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){let t={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat};return Object.assign(t,super.getConfig()),t}};let pooling_MaxPooling2D=class pooling_MaxPooling2D extends Pooling2D{constructor(t){super(t)}poolingFunction(t,r,a,n,s){return common_checkDataFormat(s),checkPaddingMode(n),pool2d(t,r,a,n,s,"max")}};pooling_MaxPooling2D.className="MaxPooling2D",registerClass(pooling_MaxPooling2D);let pooling_AveragePooling2D=class pooling_AveragePooling2D extends Pooling2D{constructor(t){super(t)}poolingFunction(t,r,a,n,s){return common_checkDataFormat(s),checkPaddingMode(n),pool2d(t,r,a,n,s,"avg")}};pooling_AveragePooling2D.className="AveragePooling2D",registerClass(pooling_AveragePooling2D);let Pooling3D=class Pooling3D extends Layer{constructor(t){if(null==t.poolSize&&(t.poolSize=[2,2,2]),super(t),this.poolSize=Array.isArray(t.poolSize)?t.poolSize:[t.poolSize,t.poolSize,t.poolSize],null==t.strides)this.strides=this.poolSize;else if(Array.isArray(t.strides)){if(3!==t.strides.length)throw new errors_ValueError(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${t.strides.length}.`);this.strides=t.strides}else this.strides=[t.strides,t.strides,t.strides];assertPositiveInteger(this.poolSize,"poolSize"),assertPositiveInteger(this.strides,"strides"),this.padding=null==t.padding?"valid":t.padding,this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,common_checkDataFormat(this.dataFormat),checkPaddingMode(this.padding),this.inputSpec=[new InputSpec({ndim:5})]}computeOutputShape(t){t=getExactlyOneShape(t);let r="channelsFirst"===this.dataFormat?t[2]:t[1],a="channelsFirst"===this.dataFormat?t[3]:t[2],n="channelsFirst"===this.dataFormat?t[4]:t[3];return(r=convOutputLength(r,this.poolSize[0],this.padding,this.strides[0]),a=convOutputLength(a,this.poolSize[1],this.padding,this.strides[1]),n=convOutputLength(n,this.poolSize[2],this.padding,this.strides[2]),"channelsFirst"===this.dataFormat)?[t[0],t[1],r,a,n]:[t[0],r,a,n,t[4]]}call(t,r){return globals_tidy(()=>(this.invokeCallHook(t,r),this.poolingFunction(getExactlyOneTensor(t),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){let t={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat};return Object.assign(t,super.getConfig()),t}};let pooling_MaxPooling3D=class pooling_MaxPooling3D extends Pooling3D{constructor(t){super(t)}poolingFunction(t,r,a,n,s){return common_checkDataFormat(s),checkPaddingMode(n),pool3d(t,r,a,n,s,"max")}};pooling_MaxPooling3D.className="MaxPooling3D",registerClass(pooling_MaxPooling3D);let pooling_AveragePooling3D=class pooling_AveragePooling3D extends Pooling3D{constructor(t){super(t)}poolingFunction(t,r,a,n,s){return common_checkDataFormat(s),checkPaddingMode(n),pool3d(t,r,a,n,s,"avg")}};pooling_AveragePooling3D.className="AveragePooling3D",registerClass(pooling_AveragePooling3D);let GlobalPooling1D=class GlobalPooling1D extends Layer{constructor(t){super(t),this.inputSpec=[new InputSpec({ndim:3})]}computeOutputShape(t){return[t[0],t[2]]}call(t,r){throw new errors_NotImplementedError}};let pooling_GlobalAveragePooling1D=class pooling_GlobalAveragePooling1D extends GlobalPooling1D{constructor(t){super(t||{})}call(t,r){return globals_tidy(()=>sv(getExactlyOneTensor(t),1))}};pooling_GlobalAveragePooling1D.className="GlobalAveragePooling1D",registerClass(pooling_GlobalAveragePooling1D);let pooling_GlobalMaxPooling1D=class pooling_GlobalMaxPooling1D extends GlobalPooling1D{constructor(t){super(t||{})}call(t,r){return globals_tidy(()=>nj(getExactlyOneTensor(t),1))}};pooling_GlobalMaxPooling1D.className="GlobalMaxPooling1D",registerClass(pooling_GlobalMaxPooling1D);let GlobalPooling2D=class GlobalPooling2D extends Layer{constructor(t){super(t),this.dataFormat=null==t.dataFormat?"channelsLast":t.dataFormat,common_checkDataFormat(this.dataFormat),this.inputSpec=[new InputSpec({ndim:4})]}computeOutputShape(t){return"channelsLast"===this.dataFormat?[t[0],t[3]]:[t[0],t[1]]}call(t,r){throw new errors_NotImplementedError}getConfig(){let t={dataFormat:this.dataFormat};return Object.assign(t,super.getConfig()),t}};let pooling_GlobalAveragePooling2D=class pooling_GlobalAveragePooling2D extends GlobalPooling2D{call(t,r){return globals_tidy(()=>{let r=getExactlyOneTensor(t);return"channelsLast"===this.dataFormat?sv(r,[1,2]):sv(r,[2,3])})}};pooling_GlobalAveragePooling2D.className="GlobalAveragePooling2D",registerClass(pooling_GlobalAveragePooling2D);let pooling_GlobalMaxPooling2D=class pooling_GlobalMaxPooling2D extends GlobalPooling2D{call(t,r){return globals_tidy(()=>{let r=getExactlyOneTensor(t);return"channelsLast"===this.dataFormat?nj(r,[1,2]):nj(r,[2,3])})}};pooling_GlobalMaxPooling2D.className="GlobalMaxPooling2D",registerClass(pooling_GlobalMaxPooling2D);let Wrapper=class Wrapper extends Layer{constructor(t){super(t),this.layer=t.layer}build(t){this.built=!0}get trainable(){return null!=this.layer&&this.layer.trainable}set trainable(t){null!=this.layer&&(this.layer.trainable=t)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(t){this.layer.setWeights(t)}getConfig(){let t={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}};return Object.assign(t,super.getConfig()),t}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),null!=this.layer&&this.layer.setFastWeightInitDuringBuild(t)}static fromConfig(t,r,a={}){let n=serialization_deserialize(r.layer,a);delete r.layer;let s={layer:n};return Object.assign(s,r),new t(s)}};let wrappers_TimeDistributed=class wrappers_TimeDistributed extends Wrapper{constructor(t){super(t),this.supportsMasking=!0}build(t){if((t=getExactlyOneShape(t)).length<3)throw new errors_ValueError(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(t)}`);this.inputSpec=[{shape:t}];let r=[t[0]].concat(t.slice(2));this.layer.built||(this.layer.build(r),this.layer.built=!0),super.build(t)}computeOutputShape(t){let r=[(t=getExactlyOneShape(t))[0]].concat(t.slice(2)),a=this.layer.computeOutputShape(r),n=t[1];return[a[0],n].concat(a.slice(1))}call(t,r){return globals_tidy(()=>rnn((t,a)=>[getExactlyOneTensor(this.layer.call(t,r)),[]],t=getExactlyOneTensor(t),[],!1,null,null,!1,!0)[1])}};function checkBidirectionalMergeMode(t){checkStringTypeUnionValue(oY,"BidirectionalMergeMode",t)}wrappers_TimeDistributed.className="TimeDistributed",registerClass(wrappers_TimeDistributed);let wrappers_Bidirectional=class wrappers_Bidirectional extends Wrapper{constructor(t){super(t);const r=t.layer.getConfig(),a={};a.className=t.layer.getClassName(),a.config=r,this.forwardLayer=serialization_deserialize(a),r.goBackwards=!0!==r.goBackwards;const n={};if(n.className=t.layer.getClassName(),n.config=r,this.backwardLayer=serialization_deserialize(n),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=void 0===t.mergeMode?"concat":t.mergeMode,checkBidirectionalMergeMode(this.mergeMode),t.weights)throw new errors_NotImplementedError("weights support is not implemented for Bidirectional layer yet.");this._stateful=t.layer.stateful,this.returnSequences=t.layer.returnSequences,this.returnState=t.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=t.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(t){this._trainable=t,null!=this.forwardLayer&&(this.forwardLayer.trainable=t),null!=this.backwardLayer&&(this.backwardLayer.trainable=t)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(t){let r=Math.floor(t.length/2);this.forwardLayer.setWeights(t.slice(0,r)),this.backwardLayer.setWeights(t.slice(r))}computeOutputShape(t){let r,a,n,s=this.forwardLayer.computeOutputShape(t);return(Array.isArray(s)&&Array.isArray(s[0])||(s=[s]),this.returnState&&(n=s.slice(1)),r=s[0],"concat"===this.mergeMode?(r[r.length-1]*=2,a=[r]):a=null==this.mergeMode?[r,r.slice()]:[r],this.returnState)?null==this.mergeMode?a.concat(n).concat(n.slice()):[r].concat(n).concat(n.slice()):singletonOrArray(a)}apply(t,r){let a=null==r?null:r.initialState,n=null==r?null:r.constants;null==r&&(r={});let s=standardizeArgs(t,a,n,this.numConstants);if(t=s.inputs,a=s.initialState,n=s.constants,Array.isArray(t)&&(a=t.slice(1),t=t[0]),(null==a||0===a.length)&&null==n)return super.apply(t,r);let i=[],o=[];if(null!=a){let t=a.length;if(t%2>0)throw new errors_ValueError("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");r.initialState=a,i.push(...a);let n=a.map(t=>new InputSpec({shape:t.shape}));this.forwardLayer.stateSpec=n.slice(0,t/2),this.backwardLayer.stateSpec=n.slice(t/2),o.push(...n)}if(null!=n)throw new errors_NotImplementedError("Support for constants in Bidirectional layers is not implemented yet.");let l=i[0]instanceof SymbolicTensor;for(let t of i)if(t instanceof SymbolicTensor!==l)throw new errors_ValueError("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(!l)return super.apply(t,r);{let a=[t].concat(i),n=this.inputSpec.concat(o),s=this.inputSpec;this.inputSpec=n;let l=super.apply(a,r);return this.inputSpec=s,l}}call(t,r){return globals_tidy(()=>{let a,n,s,i,o=r.initialState;if(null==o)a=this.forwardLayer.call(t,r),n=this.backwardLayer.call(t,r);else{let s=o.slice(0,o.length/2),i=o.slice(o.length/2);a=this.forwardLayer.call(t,Object.assign(r,{initialState:s})),n=this.backwardLayer.call(t,Object.assign(r,{initialState:i}))}return(this.returnState&&(Array.isArray(a)&&(s=a.slice(1).concat(n.slice(1))),a=a[0],n=n[0]),this.returnSequences&&(n=s0(n,1)),"concat"===this.mergeMode?i=concatenate([a,n]):"sum"===this.mergeMode?i=a$(a,n):"ave"===this.mergeMode?i=aD(.5,a$(a,n)):"mul"===this.mergeMode?i=aD(a,n):null==this.mergeMode&&(i=[a,n]),this.returnState)?null==this.mergeMode?i.concat(s):[i].concat(s):i})}resetStates(t){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(t){nameScope(this.forwardLayer.name,()=>{this.forwardLayer.build(t)}),nameScope(this.backwardLayer.name,()=>{this.backwardLayer.build(t)}),this.built=!0}computeMask(t,r){let a;if(Array.isArray(r)&&(r=r[0]),a=this.returnSequences?null==this.mergeMode?[r,r]:r:null==this.mergeMode?[null,null]:null,!this.returnState)return a;{let t=this.forwardLayer.states.map(t=>null);return Array.isArray(a)?a.concat(t).concat(t):[a].concat(t).concat(t)}}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),null!=this.forwardLayer&&this.forwardLayer.setFastWeightInitDuringBuild(t),null!=this.backwardLayer&&this.backwardLayer.setFastWeightInitDuringBuild(t)}getConfig(){let t={mergeMode:this.mergeMode};return Object.assign(t,super.getConfig()),t}static fromConfig(t,r){let a=serialization_deserialize(r.layer);if(delete r.layer,null!=r.numConstants)throw new errors_NotImplementedError("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");return r.layer=a,new t(r)}};wrappers_Bidirectional.className="Bidirectional",registerClass(wrappers_Bidirectional);let image_preprocessing_Rescaling=class image_preprocessing_Rescaling extends Layer{constructor(t){super(t),this.scale=t.scale,t.offset?this.offset=t.offset:this.offset=0}getConfig(){let t={scale:this.scale,offset:this.offset};return Object.assign(t,super.getConfig()),t}call(t,r){return globals_tidy(()=>("float32"!==(t=getExactlyOneTensor(t)).dtype&&(t=aE(t,"float32")),a$(aD(t,this.scale),this.offset)))}};image_preprocessing_Rescaling.className="Rescaling",registerClass(image_preprocessing_Rescaling);let{resizeBilinear:ls,cropAndResize:li}=om;let center_crop_CenterCrop=class center_crop_CenterCrop extends Layer{constructor(t){super(t),this.height=t.height,this.width=t.width}centerCrop(t,r,a,n,s,i,o,l){return globals_tidy(()=>{let u,p=!1,m=[r/i,a/o,(n+r)/i,(s+a)/o],y=[];3===t.rank?(p=!0,u=ig([t])):u=t;for(let t=0;taE(ls(t,[r,a]),n))}call(t,r){return globals_tidy(()=>{let r=getExactlyOneTensor(t),a=r.dtype,n=r.shape,s=n[n.length-3],i=n[n.length-2],o=0;s!==this.height&&(o=Math.floor((s-this.height)/2));let l=0;return(i!==this.width&&0===(l=Math.floor((i-this.width)/2))&&(l=1),o>=0&&l>=0)?this.centerCrop(r,o,l,this.height,this.width,s,i,a):this.upsize(t,this.height,this.width,a)})}getConfig(){let t={height:this.height,width:this.width};return Object.assign(t,super.getConfig()),t}computeOutputShape(t){let r=(t=getExactlyOneShape(t)).length-3,a=t.length-2;return t[r]=this.height,t[a]=this.width,t}};function encodeCategoricalInputs(t,r,a,n){let s,i=getExactlyOneTensor(t);if("int32"!==i.dtype&&(i=aE(i,"int32")),"int"===r)return i;let o=i.shape;if(0===i.rank&&(i=nZ(i,-1)),"oneHot"===r&&1!==i.shape[i.shape.length-1]&&(i=nZ(i,-1)),i.rank>2)throw new errors_ValueError(`When outputMode is not int, maximum output rank is 2 Received outputMode ${r} and input shape ${o} which would result in output rank ${i.rank}.`);let l=["multiHot","oneHot"].includes(r),u=i;if(s=void 0!==n&&"count"===r?nR(u,n,a,l):nR(u,[],a,l),"tfIdf"!==r)return s;if(n)return aD(s,n);throw new errors_ValueError("When outputMode is 'tfIdf', weights must be provided.")}center_crop_CenterCrop.className="CenterCrop",registerClass(center_crop_CenterCrop);let category_encoding_CategoryEncoding=class category_encoding_CategoryEncoding extends Layer{constructor(t){super(t),this.numTokens=t.numTokens,t.outputMode?this.outputMode=t.outputMode:this.outputMode="multiHot"}getConfig(){let t={numTokens:this.numTokens,outputMode:this.outputMode};return Object.assign(t,super.getConfig()),t}computeOutputShape(t){return null==(t=getExactlyOneShape(t))?[this.numTokens]:("oneHot"===this.outputMode&&1!==t[t.length-1]?t.push(this.numTokens):t[t.length-1]=this.numTokens,t)}call(t,r){return globals_tidy(()=>{let a;if("int32"!==(t=getExactlyOneTensor(t)).dtype&&(t=aE(t,"int32")),void 0!==r.countWeights){if("count"!==this.outputMode)throw new errors_ValueError(`countWeights is not used when outputMode !== count. Received countWeights=${r.countWeights}`);a=getExactlyOneTensor(r.countWeights)}let n=nj(t),s=nK(t),i=n3(this.numTokens,n).bufferSync().get(0),o=n4(s,0).bufferSync().get(0);if(!(i&&o))throw new errors_ValueError(`Input values must be between 0 < values <= numTokens with numTokens=${this.numTokens}`);return encodeCategoricalInputs(t,this.outputMode,this.numTokens,a)})}};category_encoding_CategoryEncoding.className="CategoryEncoding",registerClass(category_encoding_CategoryEncoding);let lo=new Set(["bilinear","nearest"]);let image_resizing_Resizing=class image_resizing_Resizing extends Layer{constructor(t){if(super(t),this.height=t.height,this.width=t.width,t.interpolation)if(lo.has(t.interpolation))this.interpolation=t.interpolation;else throw new errors_ValueError(`Invalid interpolation parameter: ${t.interpolation} is not implemented`);else this.interpolation="bilinear";this.cropToAspectRatio=!!t.cropToAspectRatio}computeOutputShape(t){let r=(t=getExactlyOneShape(t))[2];return[this.height,this.width,r]}getConfig(){let t={height:this.height,width:this.width,interpolation:this.interpolation,cropToAspectRatio:this.cropToAspectRatio};return Object.assign(t,super.getConfig()),t}call(t,r){return globals_tidy(()=>{let r=[this.height,this.width];if("bilinear"===this.interpolation)return om.resizeBilinear(t,r,!this.cropToAspectRatio);if("nearest"===this.interpolation)return om.resizeNearestNeighbor(t,r,!this.cropToAspectRatio);throw Error(`Interpolation is ${this.interpolation} but only ${[...lo]} are supported`)})}};image_resizing_Resizing.className="Resizing",registerClass(image_resizing_Resizing);let RandomSeed=class RandomSeed{constructor(t){this.seed=t}next(){if(void 0!==this.seed)return this.seed++}};RandomSeed.className="RandomSeed";let BaseRandomLayer=class BaseRandomLayer extends Layer{constructor(t){super(t),this.randomGenerator=new RandomSeed(t.seed)}getConfig(){let t={seed:this.randomGenerator.seed};return Object.assign(t,super.getConfig()),t}};BaseRandomLayer.className="BaseRandomLayer";let ll=new Set(["bilinear","nearest"]);let random_width_RandomWidth=class random_width_RandomWidth extends BaseRandomLayer{constructor(t){super(t);const{factor:r,interpolation:a="bilinear"}=t;if(this.factor=r,Array.isArray(this.factor)&&2===this.factor.length)this.widthLower=this.factor[0],this.widthUpper=this.factor[1];else if(!Array.isArray(this.factor)&&this.factor>0)this.widthLower=-this.factor,this.widthUpper=this.factor;else throw new errors_ValueError(`Invalid factor: ${this.factor}. Must be positive number or tuple of 2 numbers`);if(this.widthLower<-1||this.widthUpper<-1)throw new errors_ValueError(`factor must have values larger than -1. Got: ${this.factor}`);if(this.widthUpper{let r=getExactlyOneTensor(t);this.imgHeight=r.shape[r.shape.length-3];let a=r.shape[r.shape.length-2];this.widthFactor=sq([1],1+this.widthLower,1+this.widthUpper,"float32",this.randomGenerator.next());let n=this.widthFactor.dataSync()[0]*a;n=Math.round(n);let s=[this.imgHeight,n];switch(this.interpolation){case"bilinear":return om.resizeBilinear(t,s);case"nearest":return om.resizeNearestNeighbor(t,s);default:throw Error(`Interpolation is ${this.interpolation} but only ${[...ll]} are supported`)}})}};random_width_RandomWidth.className="RandomWidth",registerClass(random_width_RandomWidth);eV.registerFlag("KEEP_INTERMEDIATE_TENSORS",()=>!1,t=>{t&&console.warn("Keep intermediate tensors is ON. This will print the values of all intermediate tensors during model inference. Not all models support this mode. For details, check e2e/benchmarks/ model_config.js. This significantly impacts performance.")}),(F=en||(en={}))[F.DT_INVALID=0]="DT_INVALID",F[F.DT_FLOAT=1]="DT_FLOAT",F[F.DT_DOUBLE=2]="DT_DOUBLE",F[F.DT_INT32=3]="DT_INT32",F[F.DT_UINT8=4]="DT_UINT8",F[F.DT_INT16=5]="DT_INT16",F[F.DT_INT8=6]="DT_INT8",F[F.DT_STRING=7]="DT_STRING",F[F.DT_COMPLEX64=8]="DT_COMPLEX64",F[F.DT_INT64=9]="DT_INT64",F[F.DT_BOOL=10]="DT_BOOL",F[F.DT_QINT8=11]="DT_QINT8",F[F.DT_QUINT8=12]="DT_QUINT8",F[F.DT_QINT32=13]="DT_QINT32",F[F.DT_BFLOAT16=14]="DT_BFLOAT16",F[F.DT_QINT16=15]="DT_QINT16",F[F.DT_QUINT16=16]="DT_QUINT16",F[F.DT_UINT16=17]="DT_UINT16",F[F.DT_COMPLEX128=18]="DT_COMPLEX128",F[F.DT_HALF=19]="DT_HALF",F[F.DT_RESOURCE=20]="DT_RESOURCE",F[F.DT_VARIANT=21]="DT_VARIANT",F[F.DT_UINT32=22]="DT_UINT32",F[F.DT_UINT64=23]="DT_UINT64",F[F.DT_FLOAT_REF=101]="DT_FLOAT_REF",F[F.DT_DOUBLE_REF=102]="DT_DOUBLE_REF",F[F.DT_INT32_REF=103]="DT_INT32_REF",F[F.DT_UINT8_REF=104]="DT_UINT8_REF",F[F.DT_INT16_REF=105]="DT_INT16_REF",F[F.DT_INT8_REF=106]="DT_INT8_REF",F[F.DT_STRING_REF=107]="DT_STRING_REF",F[F.DT_COMPLEX64_REF=108]="DT_COMPLEX64_REF",F[F.DT_INT64_REF=109]="DT_INT64_REF",F[F.DT_BOOL_REF=110]="DT_BOOL_REF",F[F.DT_QINT8_REF=111]="DT_QINT8_REF",F[F.DT_QUINT8_REF=112]="DT_QUINT8_REF",F[F.DT_QINT32_REF=113]="DT_QINT32_REF",F[F.DT_BFLOAT16_REF=114]="DT_BFLOAT16_REF",F[F.DT_QINT16_REF=115]="DT_QINT16_REF",F[F.DT_QUINT16_REF=116]="DT_QUINT16_REF",F[F.DT_UINT16_REF=117]="DT_UINT16_REF",F[F.DT_COMPLEX128_REF=118]="DT_COMPLEX128_REF",F[F.DT_HALF_REF=119]="DT_HALF_REF",F[F.DT_RESOURCE_REF=120]="DT_RESOURCE_REF",F[F.DT_VARIANT_REF=121]="DT_VARIANT_REF",F[F.DT_UINT32_REF=122]="DT_UINT32_REF",F[F.DT_UINT64_REF=123]="DT_UINT64_REF",(P=(D=es||(es={})).CheckpointFormatVersion||(D.CheckpointFormatVersion={}))[P.LEGACY=0]="LEGACY",P[P.V1=1]="V1",P[P.V2=2]="V2";let lu={};function getParamValue(t,r,a,n,s){let i=r.inputParams[t];if(i&&void 0!==i.inputIndexStart){let t=i.inputIndexStart,o=0===i.inputIndexEnd?void 0:void 0===i.inputIndexEnd?t+1:i.inputIndexEnd,l=t<0?r.inputNames.length+t:t;if("tensor"===i.type)return getTensor(r.inputNames[l],a,n,s);if("tensors"===i.type){let i=r.inputs.slice(t,o);return r.inputNames.slice(t,o).filter((t,r)=>{var a;return(null==(a=i[r])?void 0:a.op)!=="NoOp"}).map(t=>getTensor(t,a,n,s))}let u=getTensor(r.inputNames[l],a,n,s),p=u.dataSync();return"number"===i.type?p[0]:toNestedArray(u.shape,p)}let o=r.attrParams[t];return o&&o.value}function getTensor(t,r,a,n){let[s,i]=parseNodeName(t,a);if(null!=n){let t=n.getHashTableHandleByName(s);if(null!=t)return t}let o=a.currentContextIds.find(t=>!!r[getNodeNameWithContextId(s,t)]);return void 0!==o?r[getNodeNameWithContextId(s,o)][i]:void 0}function getTensorsForCurrentContext(t,r,a){return r[getNodeNameWithContextId(t,a.currentContextId)]}function getNodeNameAndIndex(t,r){let[a,n,s]=parseNodeName(t,r);return[getNodeNameWithContextId(a,r&&r.currentContextId),n,s]}function getNodeNameWithContextId(t,r){return r?`${t}-${r}`:t}function parseNodeName(t,r){let a;if(""===t)return["",0,void 0];let n=null!=r&&null!=r.parseNodeNameCache;if(n){let a=r.parseNodeNameCache.get(t);if(null!=a)return a}let s=t.split(":");if(1===s.length)a=[t,0,void 0];else{let t=s[0],r=3===s.length?s[1]:void 0;a=[t,Number(s[s.length-1]),r]}return n&&r.parseNodeNameCache.set(t,a),a}function getPadding(t,r,a){let n=getParamValue("pad",t,r,a);if("explicit"===n){n=getParamValue("explicitPaddings",t,r,a);let s=[[0,0],[0,0],[0,0],[0,0]];for(let t=0;t<4;t++)s[t][0]=n[2*t],s[t][1]=n[2*t+1];return s}return n}function cloneTensor(t){return t.kept?t:aA(t)}let lp=[{tfOpName:"Add",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"AddV2",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"AddN",category:"arithmetic",inputs:[{start:0,end:0,name:"tensors",type:"tensors"}]},{tfOpName:"BiasAdd",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0}]},{tfOpName:"Sub",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"RealDiv",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Div",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"DivNoNan",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"FloorDiv",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Mul",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Maximum",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Minimum",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Pow",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"SquaredDifference",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Mod",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"FloorMod",category:"arithmetic",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]}],lh=[{tfOpName:"Abs",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Acos",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Asin",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Atan",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Atan2",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"y",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Ceil",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"ClipByValue",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"clipValueMin",type:"number"},{start:2,name:"clipValueMax",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Complex",category:"basic_math",inputs:[{start:0,name:"real",type:"tensor"},{start:1,name:"imag",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"ComplexAbs",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Cos",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Cosh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Elu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Exp",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Floor",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Log",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Imag",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"Tout",name:"outputType",type:"dtype",notSupported:!0}]},{tfOpName:"Neg",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Real",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"Tout",name:"outputType",type:"dtype",notSupported:!0}]},{tfOpName:"Prelu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"alpha",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Relu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Relu6",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Selu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sigmoid",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sin",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sinh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sqrt",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Rsqrt",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Square",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Tan",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Tanh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Sign",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Round",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Expm1",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Log1p",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Reciprocal",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Softplus",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Asinh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Acosh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Atanh",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Erf",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LeakyRelu",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"alpha",name:"alpha",type:"number",defaultValue:.2},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"IsNan",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"IsFinite",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"IsInf",category:"basic_math",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]}],lc=[{tfOpName:"EmptyTensorList",category:"control",inputs:[{start:0,name:"elementShape",type:"shape"},{start:1,name:"maxNumElements",type:"number"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"LoopCond",category:"control",inputs:[{start:0,name:"pred",type:"tensor"}]},{tfOpName:"Switch",category:"control",inputs:[{start:0,name:"data",type:"tensor"},{start:1,name:"pred",type:"tensor"}]},{tfOpName:"Merge",category:"control",inputs:[{start:0,end:0,name:"tensors",type:"tensors"}]},{tfOpName:"Enter",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"frame_name",name:"frameName",type:"string"},{tfName:"is_constant",name:"isConstant",type:"bool"}]},{tfOpName:"Exit",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"NextIteration",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"TensorArrayV3",category:"control",inputs:[{start:0,name:"size",type:"number"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"element_shape",name:"elementShape",type:"shape"},{tfName:"dynamic_size",name:"dynamicSize",type:"bool"},{tfName:"clear_after_read",name:"clearAfterRead",type:"bool"},{tfName:"identical_element_shapes",name:"identicalElementShapes",type:"bool"},{tfName:"tensor_array_name",name:"name",type:"string"}]},{tfOpName:"TensorArrayWriteV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"index",type:"number"},{start:2,name:"tensor",type:"tensor"},{start:3,name:"flowIn",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"TensorArrayReadV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"index",type:"number"},{start:2,name:"flowIn",type:"number"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"TensorArrayGatherV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"flowIn",type:"number"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"element_shape",name:"elementShape",type:"shape"}]},{tfOpName:"TensorArrayScatterV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"tensor",type:"tensor"},{start:3,name:"flowIn",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"TensorArrayConcatV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"flowIn",type:"number"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"element_shape_except0",name:"elementShapeExcept0",type:"shape",notSupported:!0}]},{tfOpName:"TensorArraySplitV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"tensor",type:"tensor"},{start:2,name:"lengths",type:"number[]"},{start:3,name:"flowIn",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"TensorArraySizeV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"},{start:1,name:"flowIn",type:"number"}]},{tfOpName:"TensorArrayCloseV3",category:"control",inputs:[{start:0,name:"tensorArrayId",type:"tensor"}]},{tfOpName:"StatelessIf",category:"control",inputs:[{start:0,name:"cond",type:"tensor"},{start:1,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"then_branch",name:"thenBranch",type:"func"},{tfName:"else_branch",name:"elseBranch",type:"func"}]},{tfOpName:"If",category:"control",inputs:[{start:0,name:"cond",type:"tensor"},{start:1,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"then_branch",name:"thenBranch",type:"func"},{tfName:"else_branch",name:"elseBranch",type:"func"}]},{tfOpName:"StatelessWhile",category:"control",inputs:[{start:0,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"cond",name:"cond",type:"func"},{tfName:"body",name:"body",type:"func"}]},{tfOpName:"While",category:"control",inputs:[{start:0,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"cond",name:"cond",type:"func"},{tfName:"body",name:"body",type:"func"}]},{tfOpName:"TensorListScatter",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListScatterV2",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"elementShape",type:"shape"},{start:3,name:"numElements",type:"number"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListGather",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"indices",type:"number[]"},{start:2,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListGetItem",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"index",type:"number"},{start:2,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListSetItem",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"index",type:"number"},{start:2,name:"tensor",type:"tensor"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListReserve",category:"control",inputs:[{start:0,name:"elementShape",type:"shape"},{start:1,name:"numElements",type:"number"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListFromTensor",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListStack",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"},{tfName:"num_elements",name:"numElements",type:"dtype"}]},{tfOpName:"TensorListSplit",category:"control",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"elementShape",type:"shape"},{start:2,name:"lengths",type:"number[]"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListConcat",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"}],attrs:[{tfName:"element_shape",name:"elementShape",type:"shape"},{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListConcatV2",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"}],attrs:[{tfName:"element_shape",name:"elementShape",type:"shape"},{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListPopBack",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"elementShape",type:"shape"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListPushBack",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"tensor",type:"tensor"}],attrs:[{tfName:"element_dtype",name:"elementDType",type:"dtype"}]},{tfOpName:"TensorListLength",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"}]},{tfOpName:"TensorListResize",category:"control",inputs:[{start:0,name:"tensorListId",type:"tensor"},{start:1,name:"size",type:"number"}]}],ld=[{tfOpName:"AvgPool",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"MaxPool",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[],notSupported:!0},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"MaxPoolWithArgmax",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"include_batch_in_index",name:"includeBatchInIndex",type:"bool"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"AvgPool3D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"MaxPool3D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"ksize",name:"kernelSize",type:"number[]"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Conv1D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"stride",name:"stride",type:"number"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NWC"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"dilation",name:"dilation",type:"number",defaultValue:1}]},{tfOpName:"Conv2D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"useCudnnOnGpu",name:"useCudnnOnGpu",type:"bool"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"dilations",name:"dilations",type:"number[]"}]},{tfOpName:"_FusedConv2D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"},{start:2,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"num_args",name:"numArgs",type:"number"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"use_cudnn_on_gpu",name:"useCudnnOnGpu",type:"bool",defaultValue:!0},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"dilations",name:"dilations",type:"number[]",defaultValue:[1,1,1,1]},{tfName:"fused_ops",name:"fusedOps",type:"string[]",defaultValue:[]},{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:1e-4},{tfName:"leakyrelu_alpha",name:"leakyreluAlpha",type:"number",defaultValue:.2}]},{tfOpName:"Conv2DBackpropInput",category:"convolution",inputs:[{start:2,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"},{start:0,name:"outputShape",type:"number[]"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"dilations",name:"dilations",type:"number[]",notSupported:!0}]},{tfOpName:"DepthwiseConv2d",category:"convolution",inputs:[{start:0,name:"input",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"dilations",name:"dilations",type:"number[]"}]},{tfOpName:"DepthwiseConv2dNative",category:"convolution",inputs:[{start:0,name:"input",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]},{tfName:"dilations",name:"dilations",type:"number[]"}]},{tfOpName:"FusedDepthwiseConv2dNative",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"},{start:2,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"num_args",name:"numArgs",type:"number"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"dilations",name:"dilations",type:"number[]",defaultValue:[1,1,1,1]},{tfName:"fused_ops",name:"fusedOps",type:"string[]",defaultValue:[]},{tfName:"explicit_paddings",name:"explicitPaddings",type:"number[]",defaultValue:[]}]},{tfOpName:"Conv3D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"padding",name:"pad",type:"string"},{tfName:"data_format",name:"dataFormat",type:"string",defaultValue:"NHWC"},{tfName:"dilations",name:"dilations",type:"number[]"}]},{tfOpName:"Dilation2D",category:"convolution",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"filter",type:"tensor"}],attrs:[{tfName:"strides",name:"strides",type:"number[]"},{tfName:"rates",name:"dilations",type:"number[]"},{tfName:"padding",name:"pad",type:"string"}]}],lm=[{tfOpName:"Fill",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"},{start:1,name:"value",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"LinSpace",category:"creation",inputs:[{start:0,name:"start",type:"number"},{start:1,name:"stop",type:"number"},{start:2,name:"num",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"OneHot",category:"creation",inputs:[{start:0,name:"indices",type:"tensor"},{start:1,name:"depth",type:"number"},{start:2,name:"onValue",type:"number",defaultValue:1},{start:3,name:"offValue",type:"number",defaultValue:0}],attrs:[{tfName:"axis",name:"axis",type:"number",notSupported:!0},{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"Ones",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"OnesLike",category:"creation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"dtype",name:"dtype",type:"dtype"}]},{tfOpName:"RandomStandardNormal",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"seed",name:"seed",type:"number",defaultValue:0},{tfName:"seed2",name:"seed2",type:"number",defaultValue:0,notSupported:!0},{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"T",name:"T",type:"number",notSupported:!0}]},{tfOpName:"RandomUniform",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"minval",name:"minval",type:"number",defaultValue:0},{tfName:"maxval",name:"maxval",type:"number",defaultValue:1},{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"seed",name:"seed",type:"number",defaultValue:0},{tfName:"seed2",name:"seed2",type:"number",defaultValue:0,notSupported:!0},{tfName:"T",name:"T",type:"number",notSupported:!0}]},{tfOpName:"RandomUniformInt",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"minval",name:"minval",type:"number"},{tfName:"maxval",name:"maxval",type:"number"},{tfName:"seed",name:"seed",type:"number",defaultValue:0},{tfName:"seed2",name:"seed2",type:"number",defaultValue:0,notSupported:!0}]},{tfOpName:"Range",category:"creation",inputs:[{start:0,name:"start",type:"number"},{start:1,name:"stop",type:"number"},{start:2,name:"step",type:"number",defaultValue:0}],attrs:[{tfName:"Tidx",name:"dtype",type:"dtype"}]},{tfOpName:"TruncatedNormal",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"means",name:"mean",type:"number",defaultValue:0},{tfName:"stddev",name:"stdDev",type:"number",defaultValue:1},{tfName:"seed",name:"seed",type:"number"},{tfName:"seed2",name:"seed2",type:"number",defaultValue:0,notSupported:!0},{tfName:"dtype",name:"dtype",type:"dtype"},{tfName:"T",name:"T",type:"number",notSupported:!0}]},{tfOpName:"Zeros",category:"creation",inputs:[{start:0,name:"shape",type:"number[]"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"ZerosLike",category:"creation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"Multinomial",category:"creation",inputs:[{start:0,name:"logits",type:"tensor"},{start:1,name:"numSamples",type:"number"}],attrs:[{tfName:"seed",name:"seed",type:"number"},{tfName:"seed2",name:"seed2",type:"number"},{tfName:"T",name:"dtype",type:"dtype"},{tfName:"output_dtype",name:"output_dtype",type:"dtype"}]}],lf=[{tfOpName:"NonMaxSuppressionV2",category:"dynamic",inputs:[{start:0,name:"boxes",type:"tensor"},{start:1,name:"scores",type:"tensor"},{start:2,name:"maxOutputSize",type:"number"},{start:3,name:"iouThreshold",type:"number"}]},{tfOpName:"NonMaxSuppressionV3",category:"dynamic",inputs:[{start:0,name:"boxes",type:"tensor"},{start:1,name:"scores",type:"tensor"},{start:2,name:"maxOutputSize",type:"number"},{start:3,name:"iouThreshold",type:"number"},{start:4,name:"scoreThreshold",type:"number"}]},{tfOpName:"NonMaxSuppressionV4",category:"dynamic",inputs:[{start:0,name:"boxes",type:"tensor"},{start:1,name:"scores",type:"tensor"},{start:2,name:"maxOutputSize",type:"number"},{start:3,name:"iouThreshold",type:"number"},{start:4,name:"scoreThreshold",type:"number"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0},{tfName:"T_threshold",name:"threshold",type:"dtype",notSupported:!0},{tfName:"pad_to_max_output_size",name:"padToMaxOutputSize",type:"bool"}]},{tfOpName:"NonMaxSuppressionV5",category:"dynamic",inputs:[{start:0,name:"boxes",type:"tensor"},{start:1,name:"scores",type:"tensor"},{start:2,name:"maxOutputSize",type:"number"},{start:3,name:"iouThreshold",type:"number"},{start:4,name:"scoreThreshold",type:"number"},{start:5,name:"softNmsSigma",type:"number"}]},{tfOpName:"Where",category:"dynamic",inputs:[{start:0,name:"condition",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"ListDiff",category:"dynamic",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"y",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]}],lg=[{tfOpName:"LowerBound",category:"evaluation",inputs:[{start:0,name:"sortedSequence",type:"tensor"},{start:1,name:"values",type:"tensor"}]},{tfOpName:"TopKV2",category:"evaluation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"k",type:"number"}],attrs:[{tfName:"sorted",name:"sorted",type:"bool"}]},{tfOpName:"UpperBound",category:"evaluation",inputs:[{start:0,name:"sortedSequence",type:"tensor"},{start:1,name:"values",type:"tensor"}]},{tfOpName:"Unique",category:"evaluation",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"UniqueV2",category:"evaluation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}]}],ly=[{tfOpName:"PlaceholderWithDefault",category:"graph",inputs:[{start:0,name:"default",type:"tensor"}],attrs:[{tfName:"shape",name:"shape",type:"shape"},{tfName:"dtype",name:"dtype",type:"dtype"}]},{tfOpName:"Placeholder",category:"graph",attrs:[{tfName:"shape",name:"shape",type:"shape"},{tfName:"dtype",name:"dtype",type:"dtype"}]},{tfOpName:"Const",category:"graph"},{tfOpName:"Identity",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"IdentityN",category:"graph",inputs:[{start:0,end:0,name:"x",type:"tensors"}]},{tfOpName:"Snapshot",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"Rank",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"Size",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"Shape",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"ShapeN",category:"graph",inputs:[{start:0,end:0,name:"x",type:"tensors"}]},{tfOpName:"Print",category:"graph",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"data",type:"tensors"}],attrs:[{tfName:"message",name:"message",type:"string"},{tfName:"first_n",name:"firstN",type:"number",notSupported:!0},{tfName:"summarize",name:"summarize",type:"number",defaultValue:3}]},{tfOpName:"NoOp",category:"graph",inputs:[]},{tfOpName:"StopGradient",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"FakeQuantWithMinMaxVars",category:"graph",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"min",name:"min",type:"number"},{tfName:"max",name:"max",type:"number"}]}],lx=[{tfOpName:"HashTable",category:"hash_table",inputs:[],attrs:[{tfName:"shared_name",name:"sharedName",type:"string"},{tfName:"use_node_name_sharing",name:"useNodeNameSharing",type:"bool"},{tfName:"key_dtype",name:"keyDType",type:"dtype"},{tfName:"value_dtype",name:"valueDType",type:"dtype"}]},{tfOpName:"HashTableV2",category:"hash_table",inputs:[],attrs:[{tfName:"shared_name",name:"sharedName",type:"string"},{tfName:"use_node_name_sharing",name:"useNodeNameSharing",type:"bool"},{tfName:"key_dtype",name:"keyDType",type:"dtype"},{tfName:"value_dtype",name:"valueDType",type:"dtype"}]},{tfOpName:"LookupTableImport",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"values",type:"tensor"}],attrs:[{tfName:"Tin",name:"tIn",type:"dtype",notSupported:!0},{tfName:"Tout",name:"tOut",type:"dtype",notSupported:!0}]},{tfOpName:"LookupTableImportV2",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"values",type:"tensor"}],attrs:[{tfName:"Tin",name:"tIn",type:"dtype",notSupported:!0},{tfName:"Tout",name:"tOut",type:"dtype",notSupported:!0}]},{tfOpName:"LookupTableFind",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"defaultValue",type:"tensor"}],attrs:[{tfName:"Tin",name:"tIn",type:"dtype",notSupported:!0},{tfName:"Tout",name:"tOut",type:"dtype",notSupported:!0}]},{tfOpName:"LookupTableFindV2",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"defaultValue",type:"tensor"}],attrs:[{tfName:"Tin",name:"tIn",type:"dtype",notSupported:!0},{tfName:"Tout",name:"tOut",type:"dtype",notSupported:!0}]},{tfOpName:"LookupTableSize",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"}]},{tfOpName:"LookupTableSizeV2",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"}]},{tfOpName:"InitializeTable",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"values",type:"tensor"}]},{tfOpName:"InitializeTableV2",category:"hash_table",inputs:[{start:0,name:"tableHandle",type:"tensor"},{start:1,name:"keys",type:"tensor"},{start:2,name:"values",type:"tensor"}]}],lv=[{tfOpName:"ResizeBilinear",category:"image",inputs:[{start:0,name:"images",type:"tensor"},{start:1,name:"size",type:"number[]"}],attrs:[{tfName:"align_corners",name:"alignCorners",type:"bool"},{tfName:"half_pixel_centers",name:"halfPixelCenters",type:"bool"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"ResizeNearestNeighbor",category:"image",inputs:[{start:0,name:"images",type:"tensor"},{start:1,name:"size",type:"number[]"}],attrs:[{tfName:"align_corners",name:"alignCorners",type:"bool"},{tfName:"half_pixel_centers",name:"halfPixelCenters",type:"bool"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"CropAndResize",category:"image",inputs:[{start:0,name:"image",type:"tensor"},{start:1,name:"boxes",type:"tensor"},{start:2,name:"boxInd",type:"tensor"},{start:3,name:"cropSize",type:"number[]"}],attrs:[{tfName:"method",name:"method",type:"string"},{tfName:"extrapolation_value",name:"extrapolationValue",type:"number"}]},{tfOpName:"ImageProjectiveTransformV3",category:"image",inputs:[{start:0,name:"images",type:"tensor"},{start:1,name:"transforms",type:"tensor"},{start:2,name:"outputShape",type:"number[]"},{start:3,name:"fillValue",type:"number"}],attrs:[{tfName:"interpolation",name:"interpolation",type:"string"},{tfName:"fill_mode",name:"fillMode",type:"string"}]}],l_=[{tfOpName:"Equal",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"NotEqual",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Greater",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"GreaterEqual",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Less",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LessEqual",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LogicalAnd",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LogicalNot",category:"logical",inputs:[{start:0,name:"a",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"LogicalOr",category:"logical",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Select",category:"logical",inputs:[{start:0,name:"condition",type:"tensor"},{start:1,name:"a",type:"tensor"},{start:2,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"SelectV2",category:"logical",inputs:[{start:0,name:"condition",type:"tensor"},{start:1,name:"a",type:"tensor"},{start:2,name:"b",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"BitwiseAnd",category:"logical",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"y",type:"tensor"}]}],lT=[{tfOpName:"_FusedMatMul",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"},{start:2,end:0,name:"args",type:"tensors"}],attrs:[{tfName:"num_args",name:"numArgs",type:"number"},{tfName:"fused_ops",name:"fusedOps",type:"string[]",defaultValue:[]},{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:1e-4},{tfName:"transpose_a",name:"transposeA",type:"bool",defaultValue:!1},{tfName:"transpose_b",name:"transposeB",type:"bool",defaultValue:!1},{tfName:"leakyrelu_alpha",name:"leakyreluAlpha",type:"number",defaultValue:.2},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"MatMul",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"transpose_a",name:"transposeA",type:"bool",defaultValue:!1},{tfName:"transpose_b",name:"transposeB",type:"bool",defaultValue:!1},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"BatchMatMul",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"adj_x",name:"transposeA",type:"bool",defaultValue:!1},{tfName:"adj_y",name:"transposeB",type:"bool",defaultValue:!1},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"BatchMatMulV2",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"b",type:"tensor"}],attrs:[{tfName:"adj_x",name:"transposeA",type:"bool",defaultValue:!1},{tfName:"adj_y",name:"transposeB",type:"bool",defaultValue:!1},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Transpose",category:"matrices",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"perm",type:"number[]"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Einsum",category:"matrices",inputs:[{start:0,end:0,name:"tensors",type:"tensors"}],attrs:[{tfName:"equation",name:"equation",type:"string"},{tfName:"N",name:"n",type:"number",defaultValue:2},{tfName:"T",name:"dtype",type:"dtype"}]},{tfOpName:"MatrixBandPart",category:"matrices",inputs:[{start:0,name:"a",type:"tensor"},{start:1,name:"numLower",type:"tensor"},{start:1,name:"numUpper",type:"tensor"}]}],lk=[{tfOpName:"EuclideanNorm",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool",defaultValue:!1}]},{tfOpName:"FusedBatchNorm",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"scale",type:"tensor"},{start:2,name:"offset",type:"tensor"},{start:3,name:"mean",type:"tensor"},{start:4,name:"variance",type:"tensor"}],attrs:[{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:.001},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0}]},{tfOpName:"FusedBatchNormV2",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"scale",type:"tensor"},{start:2,name:"offset",type:"tensor"},{start:3,name:"mean",type:"tensor"},{start:4,name:"variance",type:"tensor"}],attrs:[{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:.001},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0}]},{tfOpName:"FusedBatchNormV3",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"scale",type:"tensor"},{start:2,name:"offset",type:"tensor"},{start:3,name:"mean",type:"tensor"},{start:4,name:"variance",type:"tensor"}],attrs:[{tfName:"epsilon",name:"epsilon",type:"number",defaultValue:.001},{tfName:"data_format",name:"dataFormat",type:"string",notSupported:!0}]},{tfOpName:"LRN",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"depth_radius",name:"radius",type:"number",defaultValue:5},{tfName:"bias",name:"bias",type:"number",defaultValue:1},{tfName:"alpha",name:"alpha",type:"number",defaultValue:1},{tfName:"beta",name:"beta",type:"number",defaultValue:.5}]},{tfOpName:"Softmax",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"LogSoftmax",category:"normalization",inputs:[{start:0,name:"x",type:"tensor"}]}],lS=[{tfOpName:"Bincount",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"size",type:"number"},{start:2,name:"weights",type:"tensor"}]},{tfOpName:"DenseBincount",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"size",type:"number"},{start:2,name:"weights",type:"tensor"}],attrs:[{tfName:"binary_output",name:"binaryOutput",type:"bool"}]},{tfOpName:"Max",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Mean",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Min",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Sum",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"All",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"Any",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"}]},{tfOpName:"ArgMax",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}]},{tfOpName:"ArgMin",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}]},{tfOpName:"Prod",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}],attrs:[{tfName:"keep_dims",name:"keepDims",type:"bool"},{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"Cumprod",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}],attrs:[{tfName:"exclusive",name:"exclusive",type:"bool"},{tfName:"reverse",name:"reverse",type:"bool"}]},{tfOpName:"Cumsum",category:"reduction",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}],attrs:[{tfName:"exclusive",name:"exclusive",type:"bool"},{tfName:"reverse",name:"reverse",type:"bool"}]}],lw=[{tfOpName:"ConcatV2",category:"slice_join",inputs:[{start:0,end:-1,name:"tensors",type:"tensors"},{start:-1,name:"axis",type:"number"}],attrs:[{tfName:"N",name:"n",type:"number",defaultValue:2}]},{tfOpName:"Concat",category:"slice_join",inputs:[{start:1,end:0,name:"tensors",type:"tensors"},{start:0,name:"axis",type:"number"}],attrs:[{tfName:"N",name:"n",type:"number",defaultValue:2}]},{tfOpName:"GatherV2",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"indices",type:"tensor"},{start:2,name:"axis",type:"number",defaultValue:0}],attrs:[{tfName:"batch_dims",name:"batchDims",type:"number",defaultValue:0}]},{tfOpName:"Gather",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"indices",type:"tensor"}],attrs:[{tfName:"validate_indices",name:"validateIndices",type:"bool",notSupported:!0}]},{tfOpName:"Reverse",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"dims",type:"bool[]"}]},{tfOpName:"ReverseV2",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number[]"}]},{tfOpName:"Slice",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"begin",type:"number[]"},{start:2,name:"size",type:"number[]"}]},{tfOpName:"StridedSlice",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"begin",type:"number[]"},{start:2,name:"end",type:"number[]"},{start:3,name:"strides",type:"number[]"}],attrs:[{tfName:"begin_mask",name:"beginMask",type:"number",defaultValue:0},{tfName:"end_mask",name:"endMask",type:"number",defaultValue:0},{tfName:"new_axis_mask",name:"newAxisMask",type:"number",defaultValue:0},{tfName:"ellipsis_mask",name:"ellipsisMask",type:"number",defaultValue:0},{tfName:"shrink_axis_mask",name:"shrinkAxisMask",type:"number",defaultValue:0}]},{tfOpName:"Pack",category:"slice_join",inputs:[{start:0,end:0,name:"tensors",type:"tensors"}],attrs:[{tfName:"axis",name:"axis",type:"number",defaultValue:0}]},{tfOpName:"Unpack",category:"slice_join",inputs:[{start:0,name:"tensor",type:"tensor"}],attrs:[{tfName:"axis",name:"axis",type:"number",defaultValue:0},{tfName:"num",name:"num",type:"number",defaultValue:0,notSupported:!0}]},{tfOpName:"Tile",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"reps",type:"number[]"}]},{tfOpName:"Split",category:"slice_join",inputs:[{start:0,name:"axis",type:"number",defaultValue:0},{start:1,name:"x",type:"tensor"}],attrs:[{tfName:"num_split",name:"numOrSizeSplits",type:"number",defaultValue:1}]},{tfOpName:"SplitV",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"numOrSizeSplits",type:"number[]"},{start:2,name:"axis",type:"number",defaultValue:0}]},{tfOpName:"ScatterNd",category:"slice_join",inputs:[{start:0,name:"indices",type:"tensor"},{start:1,name:"values",type:"tensor"},{start:2,name:"shape",type:"number[]"}]},{tfOpName:"GatherNd",category:"slice_join",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"indices",type:"tensor"}]},{tfOpName:"SparseToDense",category:"slice_join",inputs:[{start:0,name:"sparseIndices",type:"tensor"},{start:1,name:"outputShape",type:"number[]"},{start:2,name:"sparseValues",type:"tensor"},{start:3,name:"defaultValue",type:"tensor"}],attrs:[{tfName:"validate_indices",name:"validateIndices",type:"bool",defaultValue:!1,notSupported:!0}]},{tfOpName:"TensorScatterUpdate",category:"slice_join",inputs:[{start:0,name:"tensor",type:"tensor"},{start:1,name:"indices",type:"tensor"},{start:2,name:"values",type:"tensor"}]}],lI=[{tfOpName:"SparseFillEmptyRows",category:"sparse",inputs:[{start:0,name:"indices",type:"tensor"},{start:1,name:"values",type:"tensor"},{start:2,name:"denseShape",type:"tensor"},{start:3,name:"defaultValue",type:"tensor"}]},{tfOpName:"SparseReshape",category:"sparse",inputs:[{start:0,name:"inputIndices",type:"tensor"},{start:1,name:"inputShape",type:"tensor"},{start:2,name:"newShape",type:"tensor"}],attrs:[{tfName:"T",name:"dtype",type:"dtype",notSupported:!0}]},{tfOpName:"SparseSegmentMean",category:"sparse",inputs:[{start:0,name:"data",type:"tensor"},{start:1,name:"indices",type:"tensor"},{start:2,name:"segmentIds",type:"tensor"}]},{tfOpName:"SparseSegmentSum",category:"sparse",inputs:[{start:0,name:"data",type:"tensor"},{start:1,name:"indices",type:"tensor"},{start:2,name:"segmentIds",type:"tensor"}]}],lN=[{tfOpName:"FFT",category:"spectral",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"IFFT",category:"spectral",inputs:[{start:0,name:"x",type:"tensor"}]},{tfOpName:"RFFT",category:"spectral",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"fft_length",type:"number",notSupported:!0}]},{tfOpName:"IRFFT",category:"spectral",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"fft_length",type:"number",notSupported:!0}]}],lC=[{tfOpName:"StaticRegexReplace",category:"string",inputs:[{start:0,name:"input",type:"tensor"}],attrs:[{tfName:"pattern",name:"pattern",type:"string"},{tfName:"rewrite",name:"rewrite",type:"string"},{tfName:"replace_global",name:"replaceGlobal",type:"bool"}]},{tfOpName:"StringNGrams",category:"string",inputs:[{start:0,name:"data",type:"tensor"},{start:1,name:"dataSplits",type:"tensor"}],attrs:[{tfName:"separator",name:"separator",type:"string"},{tfName:"ngram_widths",name:"nGramWidths",type:"number[]"},{tfName:"left_pad",name:"leftPad",type:"string"},{tfName:"right_pad",name:"rightPad",type:"string"},{tfName:"pad_width",name:"padWidth",type:"number"},{tfName:"preserve_short_sequences",name:"preserveShortSequences",type:"bool"}],outputs:["ngrams","ngrams_splits"]},{tfOpName:"StringSplit",category:"string",inputs:[{start:0,name:"input",type:"tensor"},{start:1,name:"delimiter",type:"tensor"}],attrs:[{tfName:"skip_empty",name:"skipEmpty",type:"bool"}],outputs:["indices","values","shape"]},{tfOpName:"StringToHashBucketFast",category:"string",inputs:[{start:0,name:"input",type:"tensor"}],attrs:[{tfName:"num_buckets",name:"numBuckets",type:"number"}]}],lE=[{tfOpName:"Cast",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"SrcT",name:"sdtype",type:"dtype",notSupported:!0},{tfName:"DstT",name:"dtype",type:"dtype"}]},{tfOpName:"ExpandDims",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"axis",type:"number"}]},{tfOpName:"MirrorPad",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"padding",type:"number[]"}],attrs:[{tfName:"mode",name:"mode",type:"string"}]},{tfOpName:"Pad",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"padding",type:"number[]"}],attrs:[{tfName:"constant_value",name:"constantValue",type:"number",defaultValue:0}]},{tfOpName:"PadV2",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"padding",type:"number[]"},{start:2,name:"constantValue",type:"number",defaultValue:0}]},{tfOpName:"Reshape",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"shape",type:"number[]"}]},{tfOpName:"EnsureShape",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"shape",type:"number[]"}]},{tfOpName:"Squeeze",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"axis",tfDeprecatedName:"squeeze_dims",name:"axis",type:"number[]"}]},{tfOpName:"SpaceToBatchND",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"blockShape",type:"number[]"},{start:2,name:"paddings",type:"number[]"}]},{tfOpName:"BatchToSpaceND",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"blockShape",type:"number[]"},{start:2,name:"crops",type:"number[]"}]},{tfOpName:"DepthToSpace",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"}],attrs:[{tfName:"block_size",name:"blockSize",type:"number"},{tfName:"data_format",name:"dataFormat",type:"string"}]},{tfOpName:"BroadcastTo",category:"transformation",inputs:[{start:0,name:"x",type:"tensor"},{start:1,name:"shape",type:"number[]"}],attrs:[]},{tfOpName:"BroadcastArgs",category:"transformation",inputs:[{start:0,name:"s0",type:"tensor"},{start:1,name:"s1",type:"tensor"}],attrs:[]}];let OperationMapper=class OperationMapper{static get Instance(){return this._instance||(this._instance=new this)}constructor(){const t=[].concat(...[ey,ex,ev,e_,eT,ek,eS,ew,eI,eN,eC,eE,eA,e$,eR,eF,eD,eP,eO].map(t=>t.json));this.opMappers=t.reduce((t,r)=>(t[r.tfOpName]=r,t),{})}transformGraph(t,r={}){let a=t.node,n=[],s=[],i=[],o=a.reduce((t,r)=>(t[r.name]=this.mapNode(r),r.op.startsWith("Placeholder")?n.push(t[r.name]):"Const"===r.op?s.push(t[r.name]):(null==r.input||0===r.input.length)&&i.push(t[r.name]),t),{}),l=[],u=[],p={},m={};null!=r&&(p=this.mapSignatureEntries(r.inputs),m=this.mapSignatureEntries(r.outputs));let y=Object.keys(o);y.forEach(t=>{let r=o[t];r.inputNames.forEach((t,a)=>{let[n,,s]=getNodeNameAndIndex(t),i=o[n];if(null!=i.outputs){let t=i.outputs.indexOf(s);if(-1!==t){let s=`${n}:${t}`;r.inputNames[a]=s}}r.inputs.push(i),i.children.push(r)})}),0===Object.keys(m).length?y.forEach(t=>{let r=o[t];0===r.children.length&&u.push(r)}):Object.keys(m).forEach(t=>{let[r]=getNodeNameAndIndex(t),a=o[r];null!=a&&(a.signatureKey=m[t],u.push(a))}),Object.keys(p).length>0?Object.keys(p).forEach(t=>{let[r]=getNodeNameAndIndex(t),a=o[r];a&&(a.signatureKey=p[t],l.push(a))}):l=n;let _={};null!=t.library&&null!=t.library.function&&(_=t.library.function.reduce((t,r)=>(t[r.signature.name]=this.mapFunction(r),t),{}));let w={nodes:o,inputs:l,outputs:u,weights:s,placeholders:n,signature:r,functions:_};return i.length>0&&(w.initNodes=i),w}mapSignatureEntries(t){return Object.keys(t||{}).reduce((r,a)=>(r[t[a].name]=a,r),{})}mapNode(t){let r=lu[t.op]||this.opMappers[t.op]||{};null==t.attr&&(t.attr={});let a={name:t.name,op:t.op,category:r.category,inputNames:(t.input||[]).map(t=>t.startsWith("^")?t.slice(1):t),inputs:[],children:[],inputParams:{},attrParams:{},rawAttrs:t.attr,outputs:r.outputs};return null!=r.inputs&&(a.inputParams=r.inputs.reduce((t,r)=>(t[r.name]={type:r.type,inputIndexStart:r.start,inputIndexEnd:r.end},t),{})),null!=r.attrs&&(a.attrParams=r.attrs.reduce((r,a)=>{let n,s=a.type;switch(a.type){case"string":void 0===(n=getStringParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getStringParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"string[]":void 0===(n=getStringArrayParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getStringArrayParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"number":void 0===(n=getNumberParam(t.attr,a.tfName,a.defaultValue||0))&&a.tfDeprecatedName&&(n=getNumberParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"number[]":void 0===(n=getNumericArrayParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getNumericArrayParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"bool":void 0===(n=getBoolParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getBoolParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"bool[]":void 0===(n=getBoolArrayParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getBoolArrayParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"shape":void 0===(n=getTensorShapeParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getTensorShapeParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"shape[]":void 0===(n=getTensorShapeArrayParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getTensorShapeArrayParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"dtype":void 0===(n=getDtypeParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getDtypeParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"dtype[]":void 0===(n=getDtypeArrayParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getDtypeArrayParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"func":void 0===(n=getFuncParam(t.attr,a.tfName,a.defaultValue))&&a.tfDeprecatedName&&(n=getFuncParam(t.attr,a.tfDeprecatedName,a.defaultValue));break;case"tensor":case"tensors":break;default:throw Error(`Unsupported param type: ${a.type} for op: ${t.op}`)}return r[a.name]={value:n,type:s},r},{})),a}mapFunction(t){let r=t.nodeDef,a=[],n={};null!=r&&(n=r.reduce((t,r)=>(t[r.name]=this.mapNode(r),"Const"===r.op&&a.push(t[r.name]),t),{}));let s=[],i=[];t.signature.inputArg.forEach(t=>{let[r]=getNodeNameAndIndex(t.name),a={name:r,op:"Placeholder",inputs:[],inputNames:[],category:"graph",inputParams:{},attrParams:{dtype:{value:parseDtypeParam(t.type),type:"dtype"}},children:[]};a.signatureKey=t.name,s.push(a),n[r]=a}),Object.keys(n).forEach(t=>{let r=n[t];r.inputNames.forEach((t,a)=>{let[s,,i]=getNodeNameAndIndex(t),o=n[s];if(null!=o.outputs){let t=o.outputs.indexOf(i);if(-1!==t){let n=`${s}:${t}`;r.inputNames[a]=n}}r.inputs.push(o),o.children.push(r)})});let o=t.ret;t.signature.outputArg.forEach(t=>{let[r,a]=getNodeNameAndIndex(o[t.name]),s=n[r];null!=s&&(s.defaultOutput=a,i.push(s))});let l=this.mapArgsToSignature(t);return{nodes:n,inputs:s,outputs:i,weights:a,placeholders:[],signature:l}}mapArgsToSignature(t){return{methodName:t.signature.name,inputs:t.signature.inputArg.reduce((t,r)=>(t[r.name]=this.mapArgToTensorInfo(r),t),{}),outputs:t.signature.outputArg.reduce((r,a)=>(r[a.name]=this.mapArgToTensorInfo(a,t.ret),r),{})}}mapArgToTensorInfo(t,r){let a=t.name;return null!=r&&(a=r[a]),{name:a,dtype:t.type}}};function decodeBase64(t){let r=eV.global;if(void 0!==r.atob)return r.atob(t);if("u">typeof Buffer)return new Buffer(t,"base64").toString();throw Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()")}function parseStringParam(t,r){let a=Array.isArray(t)?String.fromCharCode.apply(null,t):decodeBase64(t);return r?a:a.toLowerCase()}function getStringParam(t,r,a,n=!1){let s=t[r];return null!=s?parseStringParam(s.s,n):a}function getBoolParam(t,r,a){let n=t[r];return n?n.b:a}function getNumberParam(t,r,a){let n=t[r]||{},s=null!=n.i?n.i:null!=n.f?n.f:a;return"number"==typeof s?s:parseInt(s,10)}function parseDtypeParam(t){switch("string"==typeof t&&(t=en[t]),t){case en.DT_FLOAT:case en.DT_HALF:return"float32";case en.DT_INT32:case en.DT_INT64:case en.DT_INT8:case en.DT_UINT8:return"int32";case en.DT_BOOL:return"bool";case en.DT_DOUBLE:return"float32";case en.DT_STRING:return"string";case en.DT_COMPLEX64:case en.DT_COMPLEX128:return"complex64";default:return null}}function getFuncParam(t,r,a){let n=t[r];return n&&n.func?n.func.name:a}function getDtypeParam(t,r,a){let n=t[r];return n&&n.type?parseDtypeParam(n.type):a}function getDtypeArrayParam(t,r,a){let n=t[r];return n&&n.list&&n.list.type?n.list.type.map(t=>parseDtypeParam(t)):a}function parseTensorShapeParam(t){if(!t.unknownRank)return null!=t.dim?t.dim.map(t=>"number"==typeof t.size?t.size:parseInt(t.size,10)):[]}function getTensorShapeParam(t,r,a){let n=t[r];return n&&n.shape?parseTensorShapeParam(n.shape):a}function getNumericArrayParam(t,r,a){let n=t[r];return n?((n.list.f&&n.list.f.length?n.list.f:n.list.i)||[]).map(t=>"number"==typeof t?t:parseInt(t,10)):a}function getStringArrayParam(t,r,a,n=!1){let s=t[r];return s&&s.list&&s.list.s?s.list.s.map(t=>parseStringParam(t,n)):a}function getTensorShapeArrayParam(t,r,a){let n=t[r];return n&&n.list&&n.list.shape?n.list.shape.map(t=>parseTensorShapeParam(t)):a}function getBoolArrayParam(t,r,a){let n=t[r];return n&&n.list&&n.list.b?n.list.b:a}let NodeValueImpl=class NodeValueImpl{constructor(t,r,a){this.node=t,this.tensorMap=r,this.context=a,this.inputs=[],this.attrs={},this.inputs=t.inputNames.map(t=>this.getInput(t)),null!=t.rawAttrs&&(this.attrs=Object.keys(t.rawAttrs).reduce((t,r)=>(t[r]=this.getAttr(r),t),{}))}getInput(t){return getTensor(t,this.tensorMap,this.context)}getAttr(t,r){let a=this.node.rawAttrs[t];if(null!=a.tensor)return getTensor(t,this.tensorMap,this.context);if(null!=a.i||null!=a.f)return getNumberParam(this.node.rawAttrs,t,r);if(null!=a.s)return getStringParam(this.node.rawAttrs,t,r);if(null!=a.b)return getBoolParam(this.node.rawAttrs,t,r);if(null!=a.shape)return getTensorShapeParam(this.node.rawAttrs,t,r);if(null!=a.type)return getDtypeParam(this.node.rawAttrs,t,r);if(null!=a.list){if(null!=a.list.i||null!=a.list.f)return getNumericArrayParam(this.node.rawAttrs,t,r);if(null!=a.list.s)return getStringArrayParam(this.node.rawAttrs,t,r);if(null!=a.list.shape)return getTensorShapeArrayParam(this.node.rawAttrs,t,r);if(null!=a.list.b)return getBoolArrayParam(this.node.rawAttrs,t,r);if(null!=a.list.type)return getDtypeArrayParam(this.node.rawAttrs,t,r)}return r}};function assertShapesMatchAllowUndefinedSize(t,r,a=""){if("number"!=typeof t&&"number"!=typeof r){assert(t.length===r.length,()=>a+` Shapes ${t} and ${r} must match`);for(let n=0;na+` Shapes ${t} and ${r} must match`)}}}function fullDefinedShape(t){return!("number"==typeof t||t.some(t=>t<0))}function inferElementShape(t,r,a){let n=mergeElementShape(t,a),s=!fullDefinedShape(n);if(s&&0===r.length)throw Error(`Tried to calculate elements of an empty list with non-fully-defined elementShape: ${n}`);if(s&&r.forEach(t=>{n=mergeElementShape(t.shape,n)}),!fullDefinedShape(n))throw Error(`Non-fully-defined elementShape: ${n}`);return n}function mergeElementShape(t,r){if("number"==typeof t)return r;if("number"==typeof r)return t;if(t.length!==r.length)throw Error(`Incompatible ranks during merge: ${t} vs. ${r}`);let a=[];for(let n=0;n=0&&i>=0&&s!==i)throw Error(`Incompatible shape during merge: ${t} vs. ${r}`);a[n]=s>=0?s:i}return a}let TensorArray=class TensorArray{constructor(t,r,a,n,s,i,o){this.name=t,this.dtype=r,this.maxSize=a,this.elementShape=n,this.identicalElementShapes=s,this.dynamicSize=i,this.clearAfterRead=o,this.tensors=[],this.closed_=!1,this.idTensor=scalar_scalar(0),keep(this.idTensor)}get id(){return this.idTensor.id}get closed(){return this.closed_}clearAndClose(t){this.tensors.forEach(r=>{null!=t&&t.has(r.tensor.id)||r.tensor.dispose()}),this.tensors=[],this.closed_=!0,this.idTensor.dispose()}size(){return this.tensors.length}read(t){if(this.closed_)throw Error(`TensorArray ${this.name} has already been closed.`);if(t<0||t>=this.size())throw Error(`Tried to read from index ${t}, but array size is: ${this.size()}`);let r=this.tensors[t];if(r.cleared)throw Error(`TensorArray ${this.name}: Could not read index ${t} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`);return this.clearAfterRead&&(r.cleared=!0),r.read=!0,r.tensor}readMany(t){return t.map(t=>this.read(t))}write(t,r){if(this.closed_)throw Error(`TensorArray ${this.name} has already been closed.`);if(t<0||!this.dynamicSize&&t>=this.maxSize)throw Error(`Tried to write to index ${t}, but array is not resizeable and size is: ${this.maxSize}`);let a=this.tensors[t]||{};if(r.dtype!==this.dtype)throw Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t}, because the value dtype is ${r.dtype}, but TensorArray dtype is ${this.dtype}.`);if(0===this.size()&&(null==this.elementShape||0===this.elementShape.length)&&(this.elementShape=r.shape),assertShapesMatchAllowUndefinedSize(this.elementShape,r.shape,`TensorArray ${this.name}: Could not write to TensorArray index ${t}.`),a.read)throw Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t}, because it has already been read.`);if(a.written)throw Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t}, because it has already been written.`);a.tensor=r,keep(r),a.written=!0,this.tensors[t]=a}writeMany(t,r){if(t.length!==r.length)throw Error(`TensorArray ${this.name}: could not write multiple tensors,because the index size: ${t.length} is not the same as tensors size: ${r.length}.`);t.forEach((t,a)=>this.write(t,r[a]))}gather(t,r){if(r&&r!==this.dtype)throw Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${r}`);if(t)t=t.slice(0,this.size());else{t=[];for(let r=0;r=this.maxSize)throw Error(`Max index must be < array size (${a} vs. ${this.maxSize})`);this.writeMany(t,iw(r,0))}split(t,r){if(r.dtype!==this.dtype)throw Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${r.dtype}`);let a=0,n=t.map(t=>a+=t);if(a!==r.shape[0])throw Error(`Expected sum of lengths to be equal to tensor.shape[0], but sum of lengths is ${a}, and tensor's shape is: ${r.shape}`);if(!this.dynamicSize&&t.length!==this.maxSize)throw Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${t.length}), and the TensorArray is not marked as dynamically resizeable`);let s=0===a?0:r.size/a,i=[];globals_tidy(()=>{r=a6(r,[1,a,s]);for(let a=0;a{if(a!==t.dtype)throw Error(`Invalid data types; op elements ${a}, but list elements ${t.dtype}`);assertShapesMatchAllowUndefinedSize(r,t.shape,"TensorList shape mismatch: "),keep(t)}),this.idTensor=scalar_scalar(0),this.maxNumElements=n,keep(this.idTensor)}copy(){return new TensorList([...this.tensors],this.elementShape,this.elementDtype)}clearAndClose(t){this.tensors.forEach(r=>{null!=t&&t.has(r.id)||r.dispose()}),this.tensors.length=0,this.idTensor.dispose()}size(){return this.tensors.length}stack(t,r,a=-1){if(r!==this.elementDtype)throw Error(`Invalid data types; op elements ${r}, but list elements ${this.elementDtype}`);if(-1!==a&&this.tensors.length!==a)throw Error(`Operation expected a list with ${a} elements but got a list with ${this.tensors.length} elements.`);assertShapesMatchAllowUndefinedSize(t,this.elementShape,"TensorList shape mismatch: ");let n=inferElementShape(this.elementShape,this.tensors,t);return globals_tidy(()=>ig(this.tensors.map(t=>a6(t,n)),0))}popBack(t,r){if(r!==this.elementDtype)throw Error(`Invalid data types; op elements ${r}, but list elements ${this.elementDtype}`);if(0===this.size())throw Error("Trying to pop from an empty list.");let a=inferElementShape(this.elementShape,this.tensors,t),n=this.tensors.pop();return n.kept=!1,assertShapesMatchAllowUndefinedSize(n.shape,t,"TensorList shape mismatch: "),a6(n,a)}pushBack(t){if(t.dtype!==this.elementDtype)throw Error(`Invalid data types; op elements ${t.dtype}, but list elements ${this.elementDtype}`);if(assertShapesMatchAllowUndefinedSize(t.shape,this.elementShape,"TensorList shape mismatch: "),this.maxNumElements===this.size())throw Error("Trying to push element into a full list.");keep(t),this.tensors.push(t)}resize(t){if(t<0)throw Error(`TensorListResize expects size to be non-negative. Got: ${t}`);if(-1!==this.maxNumElements&&t>this.maxNumElements)throw Error(`TensorListResize input size ${t} is greater maxNumElement ${this.maxNumElements}.`);let r=new TensorList([],this.elementShape,this.elementDtype,this.maxNumElements);r.tensors.length=t;for(let a=0;athis.tensors.length)throw Error(`Trying to access element ${t} in a list with ${this.tensors.length} elements.`);if(null==this.tensors[t])throw Error(`element at index ${t} is null.`);assertShapesMatchAllowUndefinedSize(this.tensors[t].shape,r,"TensorList shape mismatch: ");let n=inferElementShape(this.elementShape,this.tensors,r);return a6(this.tensors[t],n)}setItem(t,r){if(r.dtype!==this.elementDtype)throw Error(`Invalid data types; op elements ${r.dtype}, but list elements ${this.elementDtype}`);if(t<0||-1!==this.maxNumElements&&t>=this.maxNumElements)throw Error(`Trying to set element ${t} in a list with max ${this.maxNumElements} elements.`);assertShapesMatchAllowUndefinedSize(this.elementShape,r.shape,"TensorList shape mismatch: "),keep(r),null!=this.tensors[t]&&(this.tensors[t].kept=!1),this.tensors[t]=r}gather(t,r,a){if(r!==this.elementDtype)throw Error(`Invalid data types; op elements ${r}, but list elements ${this.elementDtype}`);assertShapesMatchAllowUndefinedSize(this.elementShape,a,"TensorList shape mismatch: "),t=t.slice(0,this.size());let n=inferElementShape(this.elementShape,this.tensors,a);return 0===t.length?tensor([],[0].concat(n)):globals_tidy(()=>ig(t.map(t=>a6(this.tensors[t],n)),0))}concat(t,r){if(t&&t!==this.elementDtype)throw Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${t}`);assertShapesMatchAllowUndefinedSize(this.elementShape,r,"TensorList shape mismatch: ");let a=inferElementShape(this.elementShape,this.tensors,r);return 0===this.size()?tensor([],[0].concat(a)):globals_tidy(()=>a7(this.tensors.map(t=>a6(t,a)),0))}};function fromTensor(t,r,a){let n=t.dtype;if(t.shape.length<1)throw Error(`Tensor must be at least a vector, but saw shape: ${t.shape}`);if(t.dtype!==a)throw Error(`Invalid data types; op elements ${t.dtype}, but list elements ${a}`);return assertShapesMatchAllowUndefinedSize(t.shape.slice(1),r,"TensorList shape mismatch: "),new TensorList(iw(t),r,n)}function reserve(t,r,a,n){return new TensorList([],t,r,n)}function scatter(t,r,a,n){if(r.length!==t.shape[0])throw Error(`Expected len(indices) == tensor.shape[0], but saw: ${r.length} vs. ${t.shape[0]}`);let s=Math.max(...r);if(null!=n&&-1!==n&&s>=n)throw Error(`Max index must be < array size (${s} vs. ${n})`);let i=new TensorList([],a,t.dtype,n),o=iw(t,0);return r.forEach((t,r)=>{i.setItem(t,o[r])}),i}function tensor_list_split(t,r,a){let n=0,s=r.map(t=>n+=t);if(n!==t.shape[0])throw Error(`Expected sum of lengths to be equal to tensor.shape[0], but sum of lengths is ${n}, and tensor's shape is: ${t.shape}`);let i=mergeElementShape(t.shape.slice(1),a),o=0===n?0:t.size/n,l=globals_tidy(()=>{let a=[];t=a6(t,[1,n,o]);for(let n=0;n{switch(t.op){case"If":case"StatelessIf":{let n=getParamValue("thenBranch",t,r,a),s=getParamValue("elseBranch",t,r,a),i=getParamValue("cond",t,r,a),o=getParamValue("args",t,r,a);if((await i.data())[0])return a.functionMap[n].executeFunctionAsync(o,a.tensorArrayMap,a.tensorListMap);return a.functionMap[s].executeFunctionAsync(o,a.tensorArrayMap,a.tensorListMap)}case"While":case"StatelessWhile":{let n=getParamValue("body",t,r,a),s=getParamValue("cond",t,r,a),i=getParamValue("args",t,r,a),o=await a.functionMap[s].executeFunctionAsync(i,a.tensorArrayMap,a.tensorListMap),l=i.map(t=>t.id),u=await o[0].data();o.forEach(t=>{t.kept||-1!==l.indexOf(t.id)||t.dispose()});let p=i;for(;u[0];){let t=p,r=(p=await a.functionMap[n].executeFunctionAsync(p,a.tensorArrayMap,a.tensorListMap)).map(t=>t.id);t.forEach(t=>{t.kept||-1!==l.indexOf(t.id)||-1!==r.indexOf(t.id)||t.dispose()});let i=await a.functionMap[s].executeFunctionAsync(p,a.tensorArrayMap,a.tensorListMap);u=await i[0].data(),i.forEach(t=>{t.kept||-1!==l.indexOf(t.id)||-1!==r.indexOf(t.id)||t.dispose()})}return p}case"LoopCond":return[cloneTensor(getParamValue("pred",t,r,a))];case"Switch":{let n=getParamValue("pred",t,r,a),s=getParamValue("data",t,r,a);return s.kept||(s=cloneTensor(s)),(await n.data())[0]?[void 0,s]:[s,void 0]}case"Merge":{let n=t.inputNames.find(t=>void 0!==getTensor(t,r,a));if(n)return[cloneTensor(getTensor(n,r,a))];return}case"Enter":{let n=getParamValue("frameName",t,r,a),s=getParamValue("tensor",t,r,a);return a.enterFrame(n),[cloneTensor(s)]}case"Exit":{let n=getParamValue("tensor",t,r,a);return a.exitFrame(),[cloneTensor(n)]}case"NextIteration":{let n=getParamValue("tensor",t,r,a);return a.nextIteration(),[cloneTensor(n)]}case"TensorArrayV3":{let n=getParamValue("size",t,r,a),s=getParamValue("dtype",t,r,a),i=getParamValue("elementShape",t,r,a),o=getParamValue("dynamicSize",t,r,a),l=getParamValue("clearAfterRead",t,r,a),u=getParamValue("identicalElementShapes",t,r,a),p=new TensorArray(getParamValue("name",t,r,a),s,n,i,u,o,l);return a.addTensorArray(p),[p.idTensor,scalar_scalar(1)]}case"TensorArrayWriteV3":{let n=getParamValue("tensorArrayId",t,r,a),s=getParamValue("index",t,r,a),i=getParamValue("tensor",t,r,a),o=a.getTensorArray(n.id);return o.write(s,i),[o.idTensor]}case"TensorArrayReadV3":{let n=getParamValue("tensorArrayId",t,r,a),s=getParamValue("index",t,r,a);return[a.getTensorArray(n.id).read(s)]}case"TensorArrayGatherV3":{let n=getParamValue("tensorArrayId",t,r,a),s=getParamValue("indices",t,r,a),i=getParamValue("dtype",t,r,a);return[a.getTensorArray(n.id).gather(s,i)]}case"TensorArrayScatterV3":{let n=getParamValue("tensorArrayId",t,r,a),s=getParamValue("indices",t,r,a),i=getParamValue("tensor",t,r,a),o=a.getTensorArray(n.id);return o.scatter(s,i),[o.idTensor]}case"TensorArrayConcatV3":{let n=getParamValue("tensorArrayId",t,r,a),s=a.getTensorArray(n.id),i=getParamValue("dtype",t,r,a);return[s.concat(i)]}case"TensorArraySplitV3":{let n=getParamValue("tensorArrayId",t,r,a),s=getParamValue("tensor",t,r,a),i=getParamValue("lengths",t,r,a),o=a.getTensorArray(n.id);return o.split(i,s),[o.idTensor]}case"TensorArraySizeV3":{let n=getParamValue("tensorArrayId",t,r,a);return[scalar_scalar(a.getTensorArray(n.id).size(),"int32")]}case"TensorArrayCloseV3":{let n=getParamValue("tensorArrayId",t,r,a),s=a.getTensorArray(n.id);return s.clearAndClose(),[s.idTensor]}case"TensorListSetItem":{let n=getParamValue("tensorListId",t,r,a),s=getParamValue("index",t,r,a),i=getParamValue("tensor",t,r,a),o=a.getTensorList(n.id);return o.setItem(s,i),[o.idTensor]}case"TensorListGetItem":{let n=getParamValue("tensorListId",t,r,a),s=getParamValue("index",t,r,a),i=getParamValue("elementShape",t,r,a),o=getParamValue("elementDType",t,r,a);return[a.getTensorList(n.id).getItem(s,i,o)]}case"TensorListScatterV2":case"TensorListScatter":{let n=getParamValue("indices",t,r,a),s=scatter(getParamValue("tensor",t,r,a),n,getParamValue("elementShape",t,r,a),getParamValue("numElements",t,r,a));return a.addTensorList(s),[s.idTensor]}case"TensorListReserve":case"EmptyTensorList":{let n=getParamValue("elementShape",t,r,a),s=getParamValue("elementDType",t,r,a),i=getParamValue("TensorListReserve"===t.op?"numElements":"maxNumElements",t,r,a),o="TensorListReserve"===t.op?-1:i,l=reserve(n,s,i,o);return a.addTensorList(l),[l.idTensor]}case"TensorListGather":{let n=getParamValue("tensorListId",t,r,a),s=getParamValue("indices",t,r,a),i=getParamValue("elementShape",t,r,a),o=getParamValue("elementDType",t,r,a);return[a.getTensorList(n.id).gather(s,o,i)]}case"TensorListStack":{let n=getParamValue("tensorListId",t,r,a),s=getParamValue("elementShape",t,r,a),i=getParamValue("elementDType",t,r,a),o=getParamValue("numElements",t,r,a);return[a.getTensorList(n.id).stack(s,i,o)]}case"TensorListFromTensor":{let n=fromTensor(getParamValue("tensor",t,r,a),getParamValue("elementShape",t,r,a),getParamValue("elementDType",t,r,a));return a.addTensorList(n),[n.idTensor]}case"TensorListConcat":case"TensorListConcatV2":{let n=getParamValue("tensorListId",t,r,a),s=a.getTensorList(n.id),i=getParamValue("dtype",t,r,a),o=getParamValue("elementShape",t,r,a);return[s.concat(i,o)]}case"TensorListPushBack":{let n=getParamValue("tensorListId",t,r,a),s=getParamValue("tensor",t,r,a),i=a.getTensorList(n.id);return i.pushBack(s),[i.idTensor]}case"TensorListPopBack":{let n=getParamValue("tensorListId",t,r,a),s=getParamValue("elementShape",t,r,a),i=getParamValue("elementDType",t,r,a);return[a.getTensorList(n.id).popBack(s,i)]}case"TensorListSplit":{let n=getParamValue("tensor",t,r,a),s=getParamValue("elementShape",t,r,a),i=tensor_list_split(n,getParamValue("lengths",t,r,a),s);return a.addTensorList(i),[i.idTensor]}case"TensorListLength":{let n=getParamValue("tensorListId",t,r,a);return[scalar_scalar(a.getTensorList(n.id).size(),"int32")]}case"TensorListResize":{let n=getParamValue("tensorListId",t,r,a),s=getParamValue("size",t,r,a),i=a.getTensorList(n.id).resize(s);return a.addTensorList(i),[i.idTensor]}default:throw TypeError(`Node type ${t.op} is not implemented`)}};function fusedConvAndDepthWiseParams(t,r,a){let[n,s]=getParamValue("fusedOps",t,r,a),i="biasadd"===n,o="prelu"===s,l=getParamValue("numArgs",t,r,a);if(i){if(o&&2!==l)throw Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!o&&i&&1!==l)throw Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias.")}if("fusedbatchnorm"===n)throw Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported");let u=getParamValue("strides",t,r,a),p=getPadding(t,r,a),m=getParamValue("dataFormat",t,r,a).toUpperCase(),y=getParamValue("dilations",t,r,a),[_,w]=getParamValue("args",t,r,a);return i||(w=_,_=void 0),{stride:u,pad:p,dataFormat:m,dilations:y,biasArg:_,preluArg:w,activationFunc:s,leakyreluAlpha:getParamValue("leakyreluAlpha",t,r,a)}}function nmsParams(t,r,a){let n=getParamValue("boxes",t,r,a),s=getParamValue("scores",t,r,a),i=getParamValue("maxOutputSize",t,r,a),o=getParamValue("iouThreshold",t,r,a);return{boxes:n,scores:s,maxOutputSize:i,iouThreshold:o,scoreThreshold:getParamValue("scoreThreshold",t,r,a),softNmsSigma:getParamValue("softNmsSigma",t,r,a)}}let dynamic_executor_executeOp=async(t,r,a,n,s=eM)=>{switch(t.op){case"NonMaxSuppressionV5":{let{boxes:n,scores:i,maxOutputSize:o,iouThreshold:l,scoreThreshold:u,softNmsSigma:p}=nmsParams(t,r,a),m=await s.image.nonMaxSuppressionWithScoreAsync(n,i,o,l,u,p);return[m.selectedIndices,m.selectedScores]}case"NonMaxSuppressionV4":{let{boxes:n,scores:i,maxOutputSize:o,iouThreshold:l,scoreThreshold:u}=nmsParams(t,r,a),p=getParamValue("padToMaxOutputSize",t,r,a),m=await s.image.nonMaxSuppressionPaddedAsync(n,i,o,l,u,p);return[m.selectedIndices,m.validOutputs]}case"NonMaxSuppressionV3":case"NonMaxSuppressionV2":{let{boxes:n,scores:i,maxOutputSize:o,iouThreshold:l,scoreThreshold:u}=nmsParams(t,r,a);return[await s.image.nonMaxSuppressionAsync(n,i,o,l,u)]}case"Where":{let n=s.cast(getParamValue("condition",t,r,a),"bool"),i=[await s.whereAsync(n)];return n.dispose(),i}case"ListDiff":return s.setdiff1dAsync(getParamValue("x",t,r,a),getParamValue("y",t,r,a));default:throw TypeError(`Node type ${t.op} is not implemented`)}};let HashTable=class HashTable{get id(){return this.handle.id}constructor(t,r){this.keyDType=t,this.valueDType=r,this.handle=scalar_scalar(0),this.tensorMap=new Map,keep(this.handle)}clearAndClose(){this.tensorMap.forEach(t=>t.dispose()),this.tensorMap.clear(),this.handle.dispose()}size(){return this.tensorMap.size}tensorSize(){return scalar_scalar(this.size(),"int32")}async import(t,r){this.checkKeyAndValueTensor(t,r);let a=await t.data();return this.tensorMap.forEach(t=>t.dispose()),this.tensorMap.clear(),globals_tidy(()=>{let t=iw(r),n=a.length,s=t.length;assert(n===s,()=>`The number of elements doesn't match, keys has ${n} elements, the values has ${s} elements.`);for(let r=0;r{let t=[];for(let n=0;n{switch(t.op){case"HashTable":case"HashTableV2":{let s=n.getHashTableHandleByName(t.name);if(null!=s)return[s];{let s=new HashTable(getParamValue("keyDType",t,r,a),getParamValue("valueDType",t,r,a));return n.addHashTable(t.name,s),[s.handle]}}case"InitializeTable":case"InitializeTableV2":case"LookupTableImport":case"LookupTableImportV2":{let s=getParamValue("tableHandle",t,r,a,n),i=getParamValue("keys",t,r,a),o=getParamValue("values",t,r,a),l=n.getHashTableById(s.id);return[await l.import(i,o)]}case"LookupTableFind":case"LookupTableFindV2":{let s=getParamValue("tableHandle",t,r,a,n),i=getParamValue("keys",t,r,a),o=getParamValue("defaultValue",t,r,a),l=n.getHashTableById(s.id);return[await l.find(i,o)]}case"LookupTableSize":case"LookupTableSizeV2":{let s=getParamValue("tableHandle",t,r,a,n);return[n.getHashTableById(s.id).tensorSize()]}default:throw TypeError(`Node type ${t.op} is not implemented`)}};function operation_executor_executeOp(t,r,a,n,s=globals_tidy){let i=((t,r,a)=>{switch(t.category){case"arithmetic":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"BiasAdd":case"AddV2":case"Add":return[n.add(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"AddN":return[n.addN(getParamValue("tensors",t,r,a))];case"FloorMod":case"Mod":return[n.mod(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"Mul":return[n.mul(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"RealDiv":case"Div":return[n.div(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"DivNoNan":return[n.divNoNan(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"FloorDiv":return[n.floorDiv(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"Sub":return[n.sub(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"Minimum":return[n.minimum(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"Maximum":return[n.maximum(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"Pow":return[n.pow(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"SquaredDifference":return[n.squaredDifference(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"basic_math":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"Abs":case"ComplexAbs":return[n.abs(getParamValue("x",t,r,a))];case"Acos":return[n.acos(getParamValue("x",t,r,a))];case"Acosh":return[n.acosh(getParamValue("x",t,r,a))];case"Asin":return[n.asin(getParamValue("x",t,r,a))];case"Asinh":return[n.asinh(getParamValue("x",t,r,a))];case"Atan":return[n.atan(getParamValue("x",t,r,a))];case"Atan2":return[n.atan2(getParamValue("x",t,r,a),getParamValue("y",t,r,a))];case"Atanh":return[n.atanh(getParamValue("x",t,r,a))];case"Ceil":return[n.ceil(getParamValue("x",t,r,a))];case"Complex":return[n.complex(getParamValue("real",t,r,a),getParamValue("imag",t,r,a))];case"Cos":return[n.cos(getParamValue("x",t,r,a))];case"Cosh":return[n.cosh(getParamValue("x",t,r,a))];case"Elu":return[n.elu(getParamValue("x",t,r,a))];case"Erf":return[n.erf(getParamValue("x",t,r,a))];case"Exp":return[n.exp(getParamValue("x",t,r,a))];case"Expm1":return[n.expm1(getParamValue("x",t,r,a))];case"Floor":return[n.floor(getParamValue("x",t,r,a))];case"Log":return[n.log(getParamValue("x",t,r,a))];case"Log1p":return[n.log1p(getParamValue("x",t,r,a))];case"Imag":return[n.imag(getParamValue("x",t,r,a))];case"Neg":return[n.neg(getParamValue("x",t,r,a))];case"Reciprocal":return[n.reciprocal(getParamValue("x",t,r,a))];case"Real":return[n.real(getParamValue("x",t,r,a))];case"Relu":return[n.relu(getParamValue("x",t,r,a))];case"Round":return[n.round(getParamValue("x",t,r,a))];case"Selu":return[n.selu(getParamValue("x",t,r,a))];case"Sigmoid":return[n.sigmoid(getParamValue("x",t,r,a))];case"Sin":return[n.sin(getParamValue("x",t,r,a))];case"Sign":return[n.sign(getParamValue("x",t,r,a))];case"Sinh":return[n.sinh(getParamValue("x",t,r,a))];case"Softplus":return[n.softplus(getParamValue("x",t,r,a))];case"Sqrt":return[n.sqrt(getParamValue("x",t,r,a))];case"Square":return[n.square(getParamValue("x",t,r,a))];case"Tanh":return[n.tanh(getParamValue("x",t,r,a))];case"Tan":return[n.tan(getParamValue("x",t,r,a))];case"ClipByValue":return[n.clipByValue(getParamValue("x",t,r,a),getParamValue("clipValueMin",t,r,a),getParamValue("clipValueMax",t,r,a))];case"Relu6":return[n.relu6(getParamValue("x",t,r,a))];case"Rsqrt":return[n.rsqrt(getTensor(t.inputNames[0],r,a))];case"LeakyRelu":return[n.leakyRelu(getParamValue("x",t,r,a),getParamValue("alpha",t,r,a))];case"Prelu":return[n.prelu(getParamValue("x",t,r,a),getParamValue("alpha",t,r,a))];case"IsNan":return[n.isNaN(getTensor(t.inputNames[0],r,a))];case"IsInf":return[n.isInf(getTensor(t.inputNames[0],r,a))];case"IsFinite":return[n.isFinite(getTensor(t.inputNames[0],r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"control":return control_executor_executeOp(t,r,a);case"convolution":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"Conv1D":{let s=getParamValue("stride",t,r,a),i=getParamValue("pad",t,r,a),o=getParamValue("dataFormat",t,r,a).toUpperCase(),l=getParamValue("dilation",t,r,a);return[n.conv1d(getParamValue("x",t,r,a),getParamValue("filter",t,r,a),s,i,o,l)]}case"Conv2D":{let s=getParamValue("strides",t,r,a),i=getPadding(t,r,a),o=getParamValue("dataFormat",t,r,a).toUpperCase(),l=getParamValue("dilations",t,r,a);return[n.conv2d(getParamValue("x",t,r,a),getParamValue("filter",t,r,a),[s[1],s[2]],i,o,[l[1],l[2]])]}case"_FusedConv2D":{let{stride:s,pad:i,dataFormat:o,dilations:l,biasArg:u,preluArg:p,activationFunc:m,leakyreluAlpha:y}=fusedConvAndDepthWiseParams(t,r,a);return[n.fused.conv2d({x:getParamValue("x",t,r,a),filter:getParamValue("filter",t,r,a),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[l[1],l[2]],bias:u,activation:m,preluActivationWeights:p,leakyreluAlpha:y})]}case"FusedDepthwiseConv2dNative":{let{stride:s,pad:i,dataFormat:o,dilations:l,biasArg:u,preluArg:p,activationFunc:m,leakyreluAlpha:y}=fusedConvAndDepthWiseParams(t,r,a);return[n.fused.depthwiseConv2d({x:getParamValue("x",t,r,a),filter:getParamValue("filter",t,r,a),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[l[1],l[2]],bias:u,activation:m,preluActivationWeights:p,leakyreluAlpha:y})]}case"Conv2DBackpropInput":case"Conv2dTranspose":{let s=getParamValue("outputShape",t,r,a),i=getParamValue("strides",t,r,a),o=getPadding(t,r,a);return[n.conv2dTranspose(getParamValue("x",t,r,a),getParamValue("filter",t,r,a),s,[i[1],i[2]],o)]}case"DepthwiseConv2dNative":case"DepthwiseConv2d":{let s=getParamValue("strides",t,r,a),i=getPadding(t,r,a),o=getParamValue("dilations",t,r,a),l=getParamValue("dataFormat",t,r,a).toUpperCase();return[n.depthwiseConv2d(getParamValue("input",t,r,a),getParamValue("filter",t,r,a),[s[1],s[2]],i,l,[o[1],o[2]])]}case"Conv3D":{let s=getParamValue("strides",t,r,a),i=getParamValue("pad",t,r,a),o=getParamValue("dataFormat",t,r,a).toUpperCase(),l=getParamValue("dilations",t,r,a);return[n.conv3d(getParamValue("x",t,r,a),getParamValue("filter",t,r,a),[s[1],s[2],s[3]],i,o,[l[1],l[2],l[3]])]}case"AvgPool":{let s=getParamValue("strides",t,r,a),i=getParamValue("pad",t,r,a),o=getParamValue("kernelSize",t,r,a);return[n.avgPool(getParamValue("x",t,r,a),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPool":{let s=getParamValue("strides",t,r,a),i=getParamValue("pad",t,r,a),o=getParamValue("kernelSize",t,r,a);return[n.maxPool(getParamValue("x",t,r,a),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPoolWithArgmax":{let s=getParamValue("strides",t,r,a),i=getParamValue("pad",t,r,a),o=getParamValue("kernelSize",t,r,a),l=getParamValue("includeBatchInIndex",t,r,a),{result:u,indexes:p}=n.maxPoolWithArgmax(getParamValue("x",t,r,a),[o[1],o[2]],[s[1],s[2]],i,l);return[u,p]}case"AvgPool3D":{let s=getParamValue("strides",t,r,a),i=getParamValue("pad",t,r,a),o=getParamValue("kernelSize",t,r,a);return[n.avgPool3d(getParamValue("x",t,r,a),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"MaxPool3D":{let s=getParamValue("strides",t,r,a),i=getParamValue("pad",t,r,a),o=getParamValue("kernelSize",t,r,a);return[n.maxPool3d(getParamValue("x",t,r,a),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"Dilation2D":{let s=getParamValue("strides",t,r,a),i=getParamValue("pad",t,r,a),o=getParamValue("dilations",t,r,a),l=s[1],u=s[2],p=o[1],m=o[2];return[n.dilation2d(getParamValue("x",t,r,a),getParamValue("filter",t,r,a),[l,u],i,[p,m],"NHWC")]}default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"creation":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"Fill":{let s=getParamValue("shape",t,r,a),i=getParamValue("dtype",t,r,a),o=getParamValue("value",t,r,a);return[n.fill(s,o,i)]}case"LinSpace":{let s=getParamValue("start",t,r,a),i=getParamValue("stop",t,r,a),o=getParamValue("num",t,r,a);return[n.linspace(s,i,o)]}case"Multinomial":{let s=getParamValue("logits",t,r,a),i=getParamValue("numSamples",t,r,a),o=getParamValue("seed",t,r,a);return[n.multinomial(s,i,o)]}case"OneHot":{let s=getParamValue("indices",t,r,a),i=getParamValue("depth",t,r,a),o=getParamValue("onValue",t,r,a),l=getParamValue("offValue",t,r,a),u=getParamValue("dtype",t,r,a);return[n.oneHot(s,i,o,l,u)]}case"Ones":return[n.ones(getParamValue("shape",t,r,a),getParamValue("dtype",t,r,a))];case"OnesLike":return[n.onesLike(getParamValue("x",t,r,a))];case"RandomStandardNormal":return[n.randomStandardNormal(getParamValue("shape",t,r,a),getParamValue("dtype",t,r,a),getParamValue("seed",t,r,a))];case"RandomUniform":return[n.randomUniform(getParamValue("shape",t,r,a),getParamValue("minval",t,r,a),getParamValue("maxval",t,r,a),getParamValue("dtype",t,r,a))];case"RandomUniformInt":return[n.randomUniformInt(getParamValue("shape",t,r,a),getParamValue("minval",t,r,a),getParamValue("maxval",t,r,a),getParamValue("seed",t,r,a))];case"Range":{let s=getParamValue("start",t,r,a),i=getParamValue("stop",t,r,a),o=getParamValue("step",t,r,a);return[n.range(s,i,o,getParamValue("dtype",t,r,a))]}case"TruncatedNormal":{let s=getParamValue("shape",t,r,a),i=getParamValue("mean",t,r,a),o=getParamValue("stdDev",t,r,a),l=getParamValue("seed",t,r,a);return[n.truncatedNormal(s,i,o,getParamValue("dtype",t,r,a),l)]}case"Zeros":return[n.zeros(getParamValue("shape",t,r,a),getParamValue("dtype",t,r,a))];case"ZerosLike":return[n.zerosLike(getParamValue("x",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"dynamic":return dynamic_executor_executeOp(t,r,a);case"evaluation":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"LowerBound":{let s=getParamValue("sortedSequence",t,r,a),i=getParamValue("values",t,r,a);return[n.lowerBound(s,i)]}case"TopKV2":{let s=getParamValue("x",t,r,a),i=getParamValue("k",t,r,a),o=getParamValue("sorted",t,r,a),l=n.topk(s,i,o);return[l.values,l.indices]}case"UpperBound":{let s=getParamValue("sortedSequence",t,r,a),i=getParamValue("values",t,r,a);return[n.upperBound(s,i)]}case"Unique":{let s=getParamValue("x",t,r,a),i=n.unique(s);return[i.values,i.indices]}case"UniqueV2":{let s=getParamValue("x",t,r,a),i=getParamValue("axis",t,r,a),o=n.unique(s,i);return[o.values,o.indices]}default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"image":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"ResizeBilinear":{let s=getParamValue("images",t,r,a),i=getParamValue("size",t,r,a),o=getParamValue("alignCorners",t,r,a),l=getParamValue("halfPixelCenters",t,r,a);return[n.image.resizeBilinear(s,[i[0],i[1]],o,l)]}case"ResizeNearestNeighbor":{let s=getParamValue("images",t,r,a),i=getParamValue("size",t,r,a),o=getParamValue("alignCorners",t,r,a),l=getParamValue("halfPixelCenters",t,r,a);return[n.image.resizeNearestNeighbor(s,[i[0],i[1]],o,l)]}case"CropAndResize":{let s=getParamValue("image",t,r,a),i=getParamValue("boxes",t,r,a),o=getParamValue("boxInd",t,r,a),l=getParamValue("cropSize",t,r,a),u=getParamValue("method",t,r,a),p=getParamValue("extrapolationValue",t,r,a);return[n.image.cropAndResize(s,i,o,l,u,p)]}case"ImageProjectiveTransformV3":{let s=getParamValue("images",t,r,a),i=getParamValue("transforms",t,r,a),o=getParamValue("outputShape",t,r,a),l=getParamValue("fillValue",t,r,a),u=getParamValue("interpolation",t,r,a),p=getParamValue("fillMode",t,r,a);return[n.image.transform(s,i,u.toLowerCase(),p.toLowerCase(),l,o)]}default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"graph":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"Const":return r[t.name];case"PlaceholderWithDefault":let s=getParamValue("default",t,r,a);return[getTensor(t.name,r,a)||s];case"Placeholder":return[getTensor(t.name,r,a)];case"Identity":case"StopGradient":case"FakeQuantWithMinMaxVars":case"Snapshot":return[cloneTensor(getParamValue("x",t,r,a))];case"IdentityN":return getParamValue("x",t,r,a).map(t=>cloneTensor(t));case"Shape":return[n.tensor1d(getParamValue("x",t,r,a).shape,"int32")];case"ShapeN":return getParamValue("x",t,r,a).map(t=>n.tensor1d(t.shape));case"Size":return[n.scalar(getParamValue("x",t,r,a).size,"int32")];case"Rank":return[n.scalar(getParamValue("x",t,r,a).rank,"int32")];case"NoOp":return[n.scalar(1)];case"Print":let i=getParamValue("x",t,r,a),o=getParamValue("data",t,r,a),l=getParamValue("message",t,r,a),u=getParamValue("summarize",t,r,a);console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."),console.log(l);for(let t=0;t((t,r,a,n=eM)=>{switch(t.op){case"Equal":return[n.equal(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"NotEqual":return[n.notEqual(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"Greater":return[n.greater(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"GreaterEqual":return[n.greaterEqual(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"Less":return[n.less(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"LessEqual":return[n.lessEqual(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"LogicalAnd":return[n.logicalAnd(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"LogicalNot":return[n.logicalNot(getParamValue("a",t,r,a))];case"LogicalOr":return[n.logicalOr(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"Select":case"SelectV2":return[n.where(getParamValue("condition",t,r,a),getParamValue("a",t,r,a),getParamValue("b",t,r,a))];case"BitwiseAnd":return[n.bitwiseAnd(getParamValue("a",t,r,a),getParamValue("b",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"matrices":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"BatchMatMul":case"BatchMatMulV2":case"MatMul":return[n.matMul(getParamValue("a",t,r,a),getParamValue("b",t,r,a),getParamValue("transposeA",t,r,a),getParamValue("transposeB",t,r,a))];case"Einsum":return[n.einsum(getParamValue("equation",t,r,a),...getParamValue("tensors",t,r,a))];case"Transpose":return[n.transpose(getParamValue("x",t,r,a),getParamValue("perm",t,r,a))];case"_FusedMatMul":let[s,i]=getParamValue("fusedOps",t,r,a),o="prelu"===i,l=getParamValue("numArgs",t,r,a),u=getParamValue("leakyreluAlpha",t,r,a);if("biasadd"===s){if(o&&2!==l)throw Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!o&&1!==l)throw Error("Fused MatMul with BiasAdd must have one extra argument: bias.")}let[p,m]=getParamValue("args",t,r,a);return[n.fused.matMul({a:getParamValue("a",t,r,a),b:getParamValue("b",t,r,a),transposeA:getParamValue("transposeA",t,r,a),transposeB:getParamValue("transposeB",t,r,a),bias:p,activation:i,preluActivationWeights:m,leakyreluAlpha:u})];case"MatrixBandPart":return[n.linalg.bandPart(getParamValue("a",t,r,a),getParamValue("numLower",t,r,a),getParamValue("numUpper",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"normalization":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"EuclideanNorm":return[n.euclideanNorm(getParamValue("x",t,r,a),getParamValue("axis",t,r,a),getParamValue("keepDims",t,r,a))];case"FusedBatchNorm":case"FusedBatchNormV2":case"FusedBatchNormV3":return[n.batchNorm(getParamValue("x",t,r,a),getParamValue("mean",t,r,a),getParamValue("variance",t,r,a),getParamValue("offset",t,r,a),getParamValue("scale",t,r,a),getParamValue("epsilon",t,r,a))];case"LRN":return[n.localResponseNormalization(getParamValue("x",t,r,a),getParamValue("radius",t,r,a),getParamValue("bias",t,r,a),getParamValue("alpha",t,r,a),getParamValue("beta",t,r,a))];case"Softmax":return[n.softmax(getParamValue("x",t,r,a))];case"LogSoftmax":return[n.logSoftmax(getParamValue("x",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"ragged":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"RaggedGather":{let{outputNestedSplits:s,outputDenseValues:i}=n.raggedGather(getParamValue("paramsNestedSplits",t,r,a),getParamValue("paramsDenseValues",t,r,a),getParamValue("indices",t,r,a),getParamValue("outputRaggedRank",t,r,a));return s.concat(i)}case"RaggedRange":{let{rtNestedSplits:s,rtDenseValues:i}=n.raggedRange(getParamValue("starts",t,r,a),getParamValue("limits",t,r,a),getParamValue("splits",t,r,a));return[s,i]}case"RaggedTensorToTensor":return[n.raggedTensorToTensor(getParamValue("shape",t,r,a),getParamValue("values",t,r,a),getParamValue("defaultValue",t,r,a),getParamValue("rowPartitionTensors",t,r,a),getParamValue("rowPartitionTypes",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"reduction":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"Max":{let s=getParamValue("axis",t,r,a),i=getParamValue("keepDims",t,r,a);return[n.max(getParamValue("x",t,r,a),s,i)]}case"Mean":{let s=getParamValue("axis",t,r,a),i=getParamValue("keepDims",t,r,a);return[n.mean(getParamValue("x",t,r,a),s,i)]}case"Min":{let s=getParamValue("axis",t,r,a),i=getParamValue("keepDims",t,r,a);return[n.min(getParamValue("x",t,r,a),s,i)]}case"Sum":{let s=getParamValue("axis",t,r,a),i=getParamValue("keepDims",t,r,a);return[n.sum(getParamValue("x",t,r,a),s,i)]}case"All":{let s=getParamValue("axis",t,r,a),i=getParamValue("keepDims",t,r,a);return[n.all(getParamValue("x",t,r,a),s,i)]}case"Any":{let s=getParamValue("axis",t,r,a),i=getParamValue("keepDims",t,r,a);return[n.any(getParamValue("x",t,r,a),s,i)]}case"ArgMax":{let s=getParamValue("axis",t,r,a);return[n.argMax(getParamValue("x",t,r,a),s)]}case"ArgMin":{let s=getParamValue("axis",t,r,a);return[n.argMin(getParamValue("x",t,r,a),s)]}case"Prod":{let s=getParamValue("axis",t,r,a),i=getParamValue("keepDims",t,r,a);return[n.prod(getParamValue("x",t,r,a),s,i)]}case"Cumprod":{let s=getParamValue("axis",t,r,a),i=getParamValue("exclusive",t,r,a),o=getParamValue("reverse",t,r,a);return[n.cumprod(getParamValue("x",t,r,a),s,i,o)]}case"Cumsum":{let s=getParamValue("axis",t,r,a),i=getParamValue("exclusive",t,r,a),o=getParamValue("reverse",t,r,a);return[n.cumsum(getParamValue("x",t,r,a),s,i,o)]}case"Bincount":let s=getParamValue("x",t,r,a),i=getParamValue("weights",t,r,a),o=getParamValue("size",t,r,a);return[n.bincount(s,i,o)];case"DenseBincount":{let s=getParamValue("x",t,r,a),i=getParamValue("weights",t,r,a),o=getParamValue("size",t,r,a),l=getParamValue("binaryOutput",t,r,a);return[n.denseBincount(s,i,o,l)]}default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"slice_join":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"ConcatV2":case"Concat":{let s=getParamValue("n",t,r,a),i=getParamValue("axis",t,r,a),o=getParamValue("tensors",t,r,a);return o=o.slice(0,s),[n.concat(o,i)]}case"Gather":{let s=getParamValue("x",t,r,a),i=getParamValue("indices",t,r,a);return[n.gather(s,n.cast(i,"int32"),0)]}case"GatherV2":{let s=getParamValue("axis",t,r,a),i=getParamValue("batchDims",t,r,a),o=getParamValue("x",t,r,a),l=getParamValue("indices",t,r,a);return[n.gather(o,n.cast(l,"int32"),s,i)]}case"Reverse":{let s=getParamValue("dims",t,r,a),i=[];for(let t=0;t{let s=getParamValue("axis",t,r,a),i=getParamValue("tensors",t,r,a),o=i[0].shape,l=n.squeeze(i[0]).shape,u=i.map(t=>{let r=arraysEqual(t.shape,o);if(!r&&!arraysEqual(n.squeeze(t).shape,l))throw Error("the input tensors shape does not match");return r?t:n.reshape(t,o)});return[n.stack(u,s)]});case"Unpack":{let s=getParamValue("axis",t,r,a),i=getParamValue("tensor",t,r,a);return n.unstack(i,s)}case"Tile":{let s=getParamValue("reps",t,r,a);return[n.tile(getParamValue("x",t,r,a),s)]}case"Split":case"SplitV":{let s=getParamValue("axis",t,r,a),i=getParamValue("numOrSizeSplits",t,r,a),o=getParamValue("x",t,r,a);return n.split(o,i,s)}case"ScatterNd":{let s=getParamValue("indices",t,r,a),i=getParamValue("values",t,r,a),o=getParamValue("shape",t,r,a);return[n.scatterND(s,i,o)]}case"GatherNd":{let s=getParamValue("x",t,r,a),i=getParamValue("indices",t,r,a);return[n.gatherND(s,i)]}case"SparseToDense":{let s=getParamValue("sparseIndices",t,r,a),i=getParamValue("outputShape",t,r,a),o=getParamValue("sparseValues",t,r,a),l=getParamValue("defaultValue",t,r,a);return[n.sparseToDense(s,o,i,o.dtype===l.dtype?l:n.cast(l,o.dtype))]}case"TensorScatterUpdate":{let s=getParamValue("indices",t,r,a),i=getParamValue("values",t,r,a),o=getParamValue("tensor",t,r,a);return[n.tensorScatterUpdate(o,s,i)]}default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"sparse":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"SparseFillEmptyRows":{let{outputIndices:s,outputValues:i,emptyRowIndicator:o,reverseIndexMap:l}=n.sparse.sparseFillEmptyRows(getParamValue("indices",t,r,a),getParamValue("values",t,r,a),getParamValue("denseShape",t,r,a),getParamValue("defaultValue",t,r,a));return[s,i,o,l]}case"SparseReshape":{let{outputIndices:s,outputShape:i}=n.sparse.sparseReshape(getParamValue("inputIndices",t,r,a),getParamValue("inputShape",t,r,a),getParamValue("newShape",t,r,a));return[s,i]}case"SparseSegmentMean":return[n.sparse.sparseSegmentMean(getParamValue("data",t,r,a),getParamValue("indices",t,r,a),getParamValue("segmentIds",t,r,a))];case"SparseSegmentSum":return[n.sparse.sparseSegmentSum(getParamValue("data",t,r,a),getParamValue("indices",t,r,a),getParamValue("segmentIds",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"spectral":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"FFT":return[n.fft(getParamValue("x",t,r,a))];case"IFFT":return[n.ifft(getParamValue("x",t,r,a))];case"RFFT":return[n.rfft(getParamValue("x",t,r,a))];case"IRFFT":return[n.irfft(getParamValue("x",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"string":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"StaticRegexReplace":return[n.string.staticRegexReplace(getParamValue("input",t,r,a),getParamValue("pattern",t,r,a),getParamValue("rewrite",t,r,a),getParamValue("replaceGlobal",t,r,a))];case"StringNGrams":{let{nGrams:s,nGramsSplits:i}=n.string.stringNGrams(getParamValue("data",t,r,a),getParamValue("dataSplits",t,r,a),getParamValue("separator",t,r,a),getParamValue("nGramWidths",t,r,a),getParamValue("leftPad",t,r,a),getParamValue("rightPad",t,r,a),getParamValue("padWidth",t,r,a),getParamValue("preserveShortSequences",t,r,a));return[s,i]}case"StringSplit":{let{indices:s,values:i,shape:o}=n.string.stringSplit(getParamValue("input",t,r,a),getParamValue("delimiter",t,r,a),getParamValue("skipEmpty",t,r,a));return[s,i,o]}case"StringToHashBucketFast":return[n.string.stringToHashBucketFast(getParamValue("input",t,r,a),getParamValue("numBuckets",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"transformation":return s(()=>((t,r,a,n=eM)=>{switch(t.op){case"Cast":return[n.cast(getParamValue("x",t,r,a),getParamValue("dtype",t,r,a))];case"ExpandDims":{let s=getParamValue("axis",t,r,a);return[n.expandDims(getParamValue("x",t,r,a),s)]}case"Squeeze":{let s=getParamValue("axis",t,r,a);return[n.squeeze(getParamValue("x",t,r,a),s)]}case"Reshape":return[n.reshape(getParamValue("x",t,r,a),getParamValue("shape",t,r,a))];case"EnsureShape":return[n.ensureShape(getParamValue("x",t,r,a),getParamValue("shape",t,r,a))];case"MirrorPad":return[n.mirrorPad(getParamValue("x",t,r,a),getParamValue("padding",t,r,a),getParamValue("mode",t,r,a))];case"PadV2":case"Pad":return[n.pad(getParamValue("x",t,r,a),getParamValue("padding",t,r,a),getParamValue("constantValue",t,r,a))];case"SpaceToBatchND":{let s=getParamValue("blockShape",t,r,a),i=getParamValue("paddings",t,r,a);return[n.spaceToBatchND(getParamValue("x",t,r,a),s,i)]}case"BatchToSpaceND":{let s=getParamValue("blockShape",t,r,a),i=getParamValue("crops",t,r,a);return[n.batchToSpaceND(getParamValue("x",t,r,a),s,i)]}case"DepthToSpace":{let s=getParamValue("blockSize",t,r,a),i=getParamValue("dataFormat",t,r,a).toUpperCase();return[n.depthToSpace(getParamValue("x",t,r,a),s,i)]}case"BroadcastTo":return[n.broadcastTo(getParamValue("x",t,r,a),getParamValue("shape",t,r,a))];case"BroadcastArgs":return[n.broadcastArgs(getParamValue("s0",t,r,a),getParamValue("s1",t,r,a))];default:throw TypeError(`Node type ${t.op} is not implemented`)}})(t,r,a));case"hash_table":return hash_table_executor_executeOp(t,r,a,n);case"custom":let i=lu[t.op];if(i&&i.customExecutor)return i.customExecutor(new NodeValueImpl(t,r,a));throw TypeError(`Custom op ${t.op} is not registered.`);default:throw TypeError(`Unknown op '${t.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`)}})(t,r,a);return isPromise(i)?i.then(t=>[].concat(t)):[].concat(i)}let ExecutionContext=class ExecutionContext{constructor(t={},r={},a={},n={},s){this.weightMap=t,this.tensorArrayMap=r,this.tensorListMap=a,this.functionMap=n,this.parseNodeNameCache=s,this.rootContext={id:0,frameName:"",iterationId:0},this.contexts=[this.rootContext],this.lastId=0,this.generateCurrentContextIds()}newFrame(t,r){return{id:t,frameName:r,iterationId:0}}set currentContext(t){this.contexts!==t&&(this.contexts=t,this.generateCurrentContextIds())}get currentContext(){return this.contexts}get currentContextId(){return this._currentContextIds[0]}get currentContextIds(){return this._currentContextIds}generateCurrentContextIds(){let t=[];for(let r=0;r0===t.id&&0===t.iterationId?"":`${t.frameName}-${t.iterationId}`).join("/"):""}enterFrame(t){this.contexts&&(this.lastId++,this.contexts=this.contexts.slice(),this.contexts.push(this.newFrame(this.lastId,t)),this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)))}exitFrame(){if(this.contexts&&this.contexts.length>1)this.contexts=this.contexts.slice(),this.contexts.splice(-1),this.currentContextIds.shift();else throw Error("Cannot exit frame, the context is empty")}nextIteration(){if(this.contexts&&this.contexts.length>0){this.contexts=this.contexts.slice(),this.lastId++;let t=Object.assign({},this.contexts[this.contexts.length-1]);t.iterationId+=1,t.id=this.lastId,this.contexts.splice(-1,1,t),this._currentContextIds.splice(0,1,this.contextIdforContexts(this.contexts))}else throw Error("Cannot increase frame iteration, the context is empty")}getWeight(t){return this.weightMap[t]}addTensorArray(t){this.tensorArrayMap[t.id]=t}getTensorArray(t){return this.tensorArrayMap[t]}addTensorList(t){this.tensorListMap[t.id]=t}getTensorList(t){return this.tensorListMap[t]}dispose(t){for(let r in this.tensorArrayMap)this.tensorArrayMap[r].clearAndClose(t);for(let r in this.tensorListMap)this.tensorListMap[r].clearAndClose(t)}};function getExecutionSubgraph(t,r,a,n){let s=new Set,i=[],o=null,l=null,u=new Set,p=new Set(Object.keys(t).map(t=>parseNodeName(t)[0])),m=new Set((n=n||[]).map(t=>parseNodeName(t.name)[0])),y=[...r];for(;y.length>0;){let t=y.pop();if((isControlFlow(t)||isDynamicShape(t)||isHashTable(t))&&null==o&&(l=(o=t).children.map(t=>t.name).filter(t=>s.has(t))),s.add(t.name),!(null!=a[t.name]||p.has(t.name)||m.has(t.name))){if(0===t.inputs.length){i.push(t.name);continue}t.inputs.forEach(t=>{u.has(t.name)||(u.add(t.name),y.push(t))})}}return{inputs:t,outputs:r,usedNodes:s,missingInputs:i,dynamicNode:o,syncInputs:l}}function getNodesInTopologicalOrder(t,r){let{usedNodes:a,inputs:n}=r,s=Object.keys(n).map(t=>parseNodeName(t)[0]).map(r=>t.nodes[r]),i=t.initNodes||[],isUsed=t=>a.has("string"==typeof t?t:t.name);function unique(t){return[...new Map(t.map(t=>[t.name,t])).values()]}let o=unique([...s,...t.weights,...i]).filter(isUsed),l=unique([...o,...Object.values(t.nodes)]).filter(isUsed),u=new Map(l.map(t=>[t.name,t])),p={};for(let t of l)for(let r of(p[t.name]=p[t.name]||0,t.children))isUsed(r)||(p[r.name]=1/0),p[r.name]=(p[r.name]||0)+1;let m=Object.entries(p).filter(([,t])=>0===t).map(([t])=>t),y=[...m];for(;m.length>0;){let t=m.pop();for(let r of u.get(t).children.filter(isUsed))0==--p[r.name]&&(y.push(r.name),m.push(r.name))}let _=filterPredefinedReachableNodes(y.map(t=>u.get(t)),o);return validateNodesExecutionOrder(_,o),_}function filterPredefinedReachableNodes(t,r){let a=new Map(t.map(t=>[t.name,t])),n=r.map(t=>t.name),s=new Set(n);for(;n.length>0;){let t=n.pop();for(let r of a.get(t).children)!a.has(r.name)||s.has(r.name)||(s.add(r.name),n.push(r.name))}return t.filter(t=>s.has(t.name))}let NodesExecutionOrderError=class NodesExecutionOrderError extends Error{constructor(t){super(`NodesExecutionOrderError: ${t}`)}};function validateNodesExecutionOrder(t,r){let a=new Map(t.map((t,r)=>[t.name,r])),n=new Set(r.map(t=>t.name)),isPredefined=t=>n.has("string"==typeof t?t:t.name),s=new Set(t.map(t=>t.name)),willBeExecuted=t=>s.has("string"==typeof t?t:t.name);for(let r of t){for(let t of r.children.filter(willBeExecuted)){if(!a.has(t.name))throw new NodesExecutionOrderError(`Child ${t.name} of node ${r.name} is unreachable.`);if(a.get(r.name)>a.get(t.name))throw new NodesExecutionOrderError(`Node ${r.name} is scheduled to run after its child ${t.name}.`)}if(!isPredefined(r))for(let t of r.inputs){if(!a.has(t.name))throw new NodesExecutionOrderError(`Input ${t.name} of node ${r.name} is unreachable.`);if(a.get(t.name)>a.get(r.name))throw new NodesExecutionOrderError(`Node ${r.name} is scheduled to run before its input ${t.name}.`)}}}function getNodeLiveUntilMap(t){let r=new Map(t.map((t,r)=>[t.name,r])),a=Number.MAX_SAFE_INTEGER,n=t.map((t,r)=>isControlFlow(t)?a:r),getSelfLifeSpan=t=>{let a=n[r.get(t.name)];return null==a?-1:a},s=t.map((t,r)=>t.children.map(getSelfLifeSpan).reduce((t,r)=>Math.max(t,r),n[r])),i=new Map;for(let r=0;rt[r].map(t=>t.id));this._weightIds=[].concat(...r),this._weightMap=t}set resourceManager(t){this._resourceManager=t}get inputs(){return this._inputs.map(t=>({name:t.name,shape:t.attrParams.shape?t.attrParams.shape.value:void 0,dtype:t.attrParams.dtype?t.attrParams.dtype.value:void 0}))}get outputs(){return this._outputs.map(t=>({name:t.name,shape:t.attrParams.shape?t.attrParams.shape.value:void 0,dtype:t.attrParams.dtype?t.attrParams.dtype.value:void 0}))}get inputNodes(){return this._inputs.map(t=>t.signatureKey||t.name)}get outputNodes(){return this._outputs.map(t=>{let r=t.signatureKey||t.name;return t.defaultOutput?`${r}:${t.defaultOutput}`:r})}get functions(){return Object.keys(this._functions).reduce((t,r)=>(t[r]=this._functions[r].signature,t),{})}constructor(t,r){this.graph=t,this.parent=r,this.compiledMap=new Map,this.parseNodeNameCache=new Map,this._weightMap={},this.SEPARATOR=",",this._functions={},this._functionExecutorMap={},this.keepIntermediateTensors=!1,this._outputs=t.outputs,this._inputs=t.inputs,this._initNodes=t.initNodes,this._signature=t.signature,this._functions=t.functions,null!=t.functions&&Object.keys(t.functions).forEach(r=>{this._functionExecutorMap[r]=new GraphExecutor(t.functions[r],this)})}getCompilationKey(t,r){let a=t.map(t=>t.name).sort(),n=r.map(t=>t.name).sort();return a.join(this.SEPARATOR)+"--"+n.join(this.SEPARATOR)}compile(t,r){let a=getExecutionSubgraph(t,r,this.weightMap,this._initNodes),{missingInputs:n,dynamicNode:s,syncInputs:i}=a;if(null!=s)throw Error(`This execution contains the node '${s.name}', which has the dynamic op '${s.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${i}]`);if(n.length>0){let a=r.map(t=>t.name),s=Object.keys(t);throw Error(`Cannot compute the outputs [${a}] from the provided inputs [${s}]. Missing the following inputs: [${n}]`)}let o=getNodesInTopologicalOrder(this.graph,a),l=getNodeLiveUntilMap(o);return{orderedNodes:o,nodeLiveUntilMap:l}}cloneAndKeepTensor(t){if(null==t)return null;let r=t.clone();return keep(r),r}cloneTensorList(t){return t?t.map(t=>this.cloneAndKeepTensor(t)):null}cloneTensorMap(t){return Object.fromEntries(Object.entries(t).map(([t,r])=>[t,this.cloneTensorList(r)]))}execute(t,r){this.disposeIntermediateTensors();let a=Object.keys(t=this.mapInputs(t)).sort();this.checkInputs(t),this.checkInputShapeAndType(t),r=this.mapOutputs(r),this.checkOutputs(r);let n=a.map(t=>this.graph.nodes[parseNodeName(t)[0]]),s=r.map(t=>parseNodeName(t)[0]),i=new Set(s),o=s.map(t=>this.graph.nodes[t]);0===o.length&&(o=this._outputs);let l=this.getCompilationKey(n,o),u=this.compiledMap.get(l);null==u&&(u=this.compile(t,o),this.compiledMap.set(l,u));try{this.keepIntermediateTensors=eV.getBool("KEEP_INTERMEDIATE_TENSORS")}catch(t){this.keepIntermediateTensors=!1,console.warn(t.message)}let p={},m={};return globals_tidy(()=>{let a=new ExecutionContext(this.weightMap,p,m,this.functionExecutorMap,this.parseNodeNameCache),n=Object.assign({},this.weightMap);this.keepIntermediateTensors&&(this.clonedTensorsMap=this.cloneTensorMap(this.weightMap)),Object.keys(t).forEach(r=>{let[s,i]=parseNodeName(r,a),o=[];o[i]=t[r],n[s]=o,this.keepIntermediateTensors&&(this.clonedTensorsMap[s]=this.cloneTensorList(o))});let s=this.getFrozenTensorIds(n),{orderedNodes:o,nodeLiveUntilMap:l}=u;for(let t of o){if(n[t.name])continue;let r=operation_executor_executeOp(t,n,a,this._resourceManager);if(isPromise(r))throw Error(`The execution of the op '${t.op}' returned a promise. Please use model.executeAsync() instead.`);n[t.name]=r,this.keepIntermediateTensors&&(this.clonedTensorsMap[t.name]=this.cloneTensorList(r)),this.checkTensorForDisposalWithNodeLiveUntilInfo(t,n,a,s,i,l.get(t.name))}return null==this.parent&&a.dispose(s),r.map(t=>getTensor(t,n,a))})}getFrozenTensorIds(t){return new Set([].concat.apply([],Object.keys(t).map(r=>t[r]).map(t=>t.map(t=>t.id))))}checkTensorForDisposal(t,r,a,n,s,i,o){if(!(isControlFlow(r)||i.has(t))){for(let n of a[t])null!=n&&(o[n.id]=(o[n.id]||0)+r.children.length);for(let t of r.inputs){if(isControlFlow(t))continue;let r=getTensorsForCurrentContext(t.name,a,n);if(null!=r)for(let t of r){if(!t||t.kept||s.has(t.id))continue;let r=o[t.id];1===r?(t.dispose(),delete o[t.id]):null!=r&&o[t.id]--}}}}checkTensorForDisposalWithNodeLiveUntilInfo(t,r,a,n,s,i){function isNonDisposableNode(t){return isControlFlow(t)||s.has(t.name)}if(!isControlFlow(t)&&null!=i){for(let t of i)if(!isNonDisposableNode(t))for(let s of getTensorsForCurrentContext(t.name,r,a))!s||s.kept||n.has(s.id)||s.dispose()}}async executeAsync(t,r){return this._executeAsync(t,r)}disposeIntermediateTensors(){this.clonedTensorsMap&&(Object.values(this.clonedTensorsMap).forEach(t=>{for(let r of t)r&&!r.isDisposed&&r.dispose()}),this.clonedTensorsMap=null)}getIntermediateTensors(){return this.clonedTensorsMap}async _executeAsync(t,r,a=!1,n={},s={}){this.disposeIntermediateTensors(),a||(t=this.mapInputs(t),this.checkInputs(t),this.checkInputShapeAndType(t),r=this.mapOutputs(r),this.checkOutputs(r));try{this.keepIntermediateTensors=eV.getBool("KEEP_INTERMEDIATE_TENSORS")}catch(t){this.keepIntermediateTensors=!1,console.warn(t.message)}let i=new ExecutionContext(this.weightMap,n,s,this.functionExecutorMap,this.parseNodeNameCache);this.keepIntermediateTensors&&(this.clonedTensorsMap=this.cloneTensorMap(this.weightMap));let o=await this.executeWithControlFlow(t,i,r,a),l=r.map(t=>getTensor(t,o,i)),u=new Set([...l.map(t=>t.id),...Object.keys(t).map(r=>t[r].id),...this.weightIds]);return Object.values(o).forEach(t=>{t.forEach(t=>{!t||t.isDisposed||u.has(t.id)||t.dispose()})}),null==this.parent&&i.dispose(u),l}async executeFunctionAsync(t,r,a){let n=t.reduce((t,r,a)=>(t[this.inputs[a].name]=r,t),{});return this._executeAsync(n,this.outputNodes,!0,r,a)}async executeWithControlFlow(t,r,a,n){let s=Object.keys(t),i=s.map(t=>this.graph.nodes[parseNodeName(t)[0]]),o=a.map(t=>parseNodeName(t)[0]),l=new Set(o),u=o.map(t=>this.graph.nodes[t]);0===u.length&&(u=this._outputs);let{usedNodes:p,missingInputs:m,dynamicNode:y,syncInputs:_}=getExecutionSubgraph(t,u,this.weightMap,this._initNodes),w=[...i,...this.graph.weights,...this._initNodes||[]].map(t=>({node:t,contexts:r.currentContext})),I=Object.assign({},this.weightMap);Object.keys(t).forEach(r=>{let[a,n]=parseNodeName(r),s=[];s[n]=t[r],I[a]=s});let C={},E=this.getFrozenTensorIds(I),A={};for(;w.length>0;){let t=this.processStack(i,w,r,I,A,E,l,C,p);await Promise.all(t)}null!=y||n||console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead.");let $=u.filter(t=>!isControlFlow(t)&&!getTensor(t.name,I,r)).map(t=>t.name);if($.length>0){let t="";throw null!=y&&(t=`Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${_}]`),Error(`Cannot compute the outputs [${$}] from the provided inputs [${s}]. Consider providing the following inputs: [${m}]. ${t}`)}return I}processStack(t,r,a,n,s,i,o,l,u){let p=[];for(;r.length>0;){let t=r.pop();a.currentContext=t.contexts;let m="";if("Enter"===t.node.op&&getParamValue("isConstant",t.node,n,a)&&([m]=getNodeNameAndIndex(t.node.name,a)),null==n[t.node.name]){let y=operation_executor_executeOp(t.node,n,a,this._resourceManager);m||([m]=getNodeNameAndIndex(t.node.name,a));let _=a.currentContext;isPromise(y)?p.push(y.then(p=>(n[m]=p,this.keepIntermediateTensors&&(this.clonedTensorsMap[m]=this.cloneTensorList(p)),a.currentContext=_,this.checkTensorForDisposal(m,t.node,n,a,i,o,l),this.processChildNodes(t.node,r,a,n,s,u),p))):(n[m]=y,this.keepIntermediateTensors&&(this.clonedTensorsMap[m]=this.cloneTensorList(y)),this.checkTensorForDisposal(m,t.node,n,a,i,o,l),this.processChildNodes(t.node,r,a,n,s,u))}else this.processChildNodes(t.node,r,a,n,s,u)}return p}processChildNodes(t,r,a,n,s,i){t.children.forEach(t=>{let[o]=getNodeNameAndIndex(t.name,a);!s[o]&&i.has(t.name)&&("Merge"===t.op?t.inputNames.some(t=>!!getTensor(t,n,a))&&(s[o]=!0,r.push({contexts:a.currentContext,node:t})):t.inputNames.every(t=>!!getTensor(t,n,a))&&(s[o]=!0,r.push({contexts:a.currentContext,node:t})))})}dispose(){Object.keys(this.weightMap).forEach(t=>this.weightMap[t].forEach(t=>t.dispose()))}checkInputShapeAndType(t){Object.keys(t).forEach(r=>{let a=t[r],[n]=parseNodeName(r),s=this.graph.nodes[n];if(s.attrParams.shape&&s.attrParams.shape.value){let t=s.attrParams.shape.value;assert(t.length===a.shape.length&&a.shape.every((r,a)=>-1===t[a]||t[a]===r),()=>`The shape of dict['${s.name}'] provided in model.execute(dict) must be [${t}], but was [${a.shape}]`)}s.attrParams.dtype&&s.attrParams.dtype.value&&assert(a.dtype===s.attrParams.dtype.value,()=>`The dtype of dict['${s.name}'] provided in model.execute(dict) must be ${s.attrParams.dtype.value}, but was ${a.dtype}`)})}mapInputs(t){var r,a;let n={};for(let s in t){let i=null==(a=null==(r=this._signature)?void 0:r.inputs)?void 0:a[s];null!=i?n[i.name]=t[s]:n[s]=t[s]}return n}checkInputs(t){let r=Object.keys(t).filter(t=>{let[r]=parseNodeName(t);return null==this.graph.nodes[r]});if(r.length>0)throw Error(`The dict provided in model.execute(dict) has keys: [${r}] that are not part of graph`)}mapOutputs(t){return t.map(t=>{var r,a;let n=null==(a=null==(r=this._signature)?void 0:r.outputs)?void 0:a[t];return null!=n?n.name:t},{})}checkOutputs(t){t.forEach(t=>{let[r]=parseNodeName(t);if(!this.graph.nodes[r])throw Error(`The output '${t}' is not found in the graph`)})}};let ResourceManager=class ResourceManager{constructor(t={},r={}){this.hashTableNameToHandle=t,this.hashTableMap=r}addHashTable(t,r){this.hashTableNameToHandle[t]=r.handle,this.hashTableMap[r.id]=r}getHashTableHandleByName(t){return this.hashTableNameToHandle[t]}getHashTableById(t){return this.hashTableMap[t]}dispose(){for(let t in this.hashTableMap)this.hashTableMap[t].clearAndClose(),delete this.hashTableMap[t];for(let t in this.hashTableNameToHandle)this.hashTableNameToHandle[t].dispose(),delete this.hashTableNameToHandle[t]}};let GraphModel=class GraphModel{get modelVersion(){return this.version}get inputNodes(){return this.executor.inputNodes}get outputNodes(){return this.executor.outputNodes}get inputs(){return this.executor.inputs}get outputs(){return this.executor.outputs}get weights(){return this.executor.weightMap}get metadata(){return this.artifacts.userDefinedMetadata}get modelSignature(){return this.signature}get modelStructuredOutputKeys(){return this.structuredOutputKeys}constructor(t,r={},a=eh){this.modelUrl=t,this.loadOptions=r,this.version="n/a",this.io=a,null==r&&(this.loadOptions={}),this.resourceManager=new ResourceManager}findIOHandler(){let t=this.modelUrl;if(null!=t.load)this.handler=t;else if(null!=this.loadOptions.requestInit)this.handler=this.io.browserHTTPRequest(t,this.loadOptions);else{let r=this.io.getLoadHandlers(t,this.loadOptions);if(0===r.length)r.push(this.io.browserHTTPRequest(t,this.loadOptions));else if(r.length>1)throw Error(`Found more than one (${r.length}) load handlers for URL '${[t]}'`);this.handler=r[0]}}load(){if(this.findIOHandler(),null==this.handler.load)throw Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");let t=this.handler.load();return isPromise(t)?t.then(t=>null==t.getWeightStream?this.loadSync(t):this.loadStreaming(t)):this.loadSync(t)}loadSync(t){let r=this.io.decodeWeights(t.weightData,t.weightSpecs);return this.loadWithWeightMap(t,r)}async loadStreaming(t){if(null==t.getWeightStream)throw Error("Model artifacts missing streamWeights function");let r=await decodeWeightsStream(t.getWeightStream(),t.weightSpecs);return this.loadWithWeightMap(t,r)}loadWithWeightMap(t,r){this.artifacts=t;let a=this.artifacts.modelTopology,n=this.artifacts.signature;if(null!=this.artifacts.userDefinedMetadata){let t=this.artifacts.userDefinedMetadata;null!=t.signature&&(n=t.signature),null!=t.structuredOutputKeys&&(this.structuredOutputKeys=t.structuredOutputKeys)}if(this.signature=n,this.version=`${a.versions.producer}.${a.versions.minConsumer}`,this.executor=new GraphExecutor(OperationMapper.Instance.transformGraph(a,this.signature)),this.executor.weightMap=this.convertTensorMapToTensorsMap(r),this.executor.resourceManager=this.resourceManager,null!=t.modelInitializer&&null!=t.modelInitializer.node){let r=OperationMapper.Instance.transformGraph(t.modelInitializer);this.initializer=new GraphExecutor(r),this.initializer.weightMap=this.executor.weightMap,this.initializer.resourceManager=this.resourceManager,this.initializerSignature=t.initializerSignature}return!0}async save(t,r){if("string"==typeof t){let r=this.io.getSaveHandlers(t);if(0===r.length)throw Error(`Cannot find any save handlers for URL '${t}'`);if(r.length>1)throw Error(`Found more than one (${r.length}) save handlers for URL '${t}'`);t=r[0]}if(null==t.save)throw Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");return t.save(this.artifacts)}addStructuredOutputNames(t){if(this.structuredOutputKeys){let r={};return(t instanceof tensor_Tensor?[t]:t).forEach((t,a)=>r[this.structuredOutputKeys[a]]=t),r}return t}predict(t,r){let a=this.execute(t,this.outputNodes);return this.addStructuredOutputNames(a)}async predictAsync(t,r){let a=await this.executeAsync(t,this.outputNodes);return this.addStructuredOutputNames(a)}normalizeInputs(t){var r;if(!(t instanceof tensor_Tensor)&&!Array.isArray(t)){let a=null==(r=this.signature)?void 0:r.inputs;if(null!=a)for(let r in a){let n=a[r];null!=n.resourceId&&(t[r]=this.resourceIdToCapturedInput[n.resourceId])}return t}t=Array.isArray(t)?t:[t];let a=Object.keys(this.resourceIdToCapturedInput).length;if(t.length+a!==this.inputNodes.length)throw Error(`Input tensor count mismatch, the graph model has ${this.inputNodes.length-a} non-resource placeholders, while there are ${t.length} input tensors provided.`);let n=0;return this.inputNodes.reduce((r,a)=>{var s,i,o;let l=null==(o=null==(i=null==(s=this.signature)?void 0:s.inputs)?void 0:i[a])?void 0:o.resourceId;return null!=l?r[a]=this.resourceIdToCapturedInput[l]:r[a]=t[n++],r},{})}normalizeOutputs(t){return Array.isArray(t=t||this.outputNodes)?t:[t]}executeInitializerGraph(){return null==this.initializer?[]:null==this.initializerSignature?this.initializer.execute({},[]):this.initializer.execute({},Object.keys(this.initializerSignature.outputs))}async executeInitializerGraphAsync(){return null==this.initializer?[]:null==this.initializerSignature?this.initializer.executeAsync({},[]):this.initializer.executeAsync({},Object.keys(this.initializerSignature.outputs))}setResourceIdToCapturedInput(t){if(this.resourceIdToCapturedInput={},this.initializerSignature){let r=this.initializerSignature.outputs,a=Object.keys(r);for(let n=0;n1?a:a[0]}async executeAsync(t,r){null==this.resourceIdToCapturedInput&&this.setResourceIdToCapturedInput(await this.executeInitializerGraphAsync()),t=this.normalizeInputs(t),r=this.normalizeOutputs(r);let a=await this.executor.executeAsync(t,r);return a.length>1?a:a[0]}getIntermediateTensors(){return this.executor.getIntermediateTensors()}disposeIntermediateTensors(){this.executor.disposeIntermediateTensors()}convertTensorMapToTensorsMap(t){return Object.keys(t).reduce((r,a)=>(r[a]=[t[a]],r),{})}dispose(){this.executor.dispose(),this.initializer&&(this.initializer.dispose(),this.resourceIdToCapturedInput&&globals_dispose(this.resourceIdToCapturedInput)),this.resourceManager.dispose()}};async function loadGraphModel(t,r={},a=eh){if(null==t)throw Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model");null==r&&(r={}),r.fromTFHub&&"string"==typeof t&&(t=getTFHubUrl(t));let n=new GraphModel(t,r,a);return await n.load(),n}function getTFHubUrl(t){return t.endsWith("/")||(t+="/"),`${t}model.json?tfjs-format=file`}function deepMap(t,r){return deepMapInternal(t,r)}function deepMapInternal(t,r,a=new Map,n=new Set){if(null==t)return null;if("function"==typeof Blob&&t instanceof Blob)return t.slice();if(n.has(t))throw Error("Circular references are not supported.");if(a.has(t))return a.get(t);let s=r(t);if(s.recurse&&null!==s.value)throw Error("A deep map function may not return both a value and recurse=true.");if(!s.recurse)return a.set(t,s.value),s.value;if(deep_map_isIterable(t)){let s=Array.isArray(t)?[]:{};for(let i in n.add(t),t){let o=deepMapInternal(t[i],r,a,n);s[i]=o}return n.delete(t),t.__proto__&&(s.__proto__=t.__proto__),s}throw Error(`Can't recurse into non-iterable type: ${t}`)}function deepZip(t,r=zipToList){return deepZipInternal(t,r)}function deepZipInternal(t,r,a=new Set){let n=t[0];if(a.has(n))throw Error("Circular references are not supported.");let s=r(t);if(s.recurse&&null!==s.value)throw Error("A deep zip function may not return both a value and recurse=true.");if(!s.recurse)return s.value;if(deep_map_isIterable(n)){let s=Array.isArray(n)?[]:{};for(let i in a.add(n),n){let n=deepZipInternal(t.map(t=>t[i]),r,a);s[i]=n}return a.delete(n),s}throw Error(`Can't recurse into non-iterable type: ${n}`)}function zipToList(t){return null===t?null:deep_map_isIterable(t[0])?{value:null,recurse:!0}:{value:t,recurse:!1}}function deep_map_isIterable(t){let r=!1;if(eV.get("IS_BROWSER"))r=t instanceof TextDecoder;else{let{StringDecoder:n}=a(0x5395223a);r=t instanceof n}return null!=t&&!ArrayBuffer.isView(t)&&(Array.isArray(t)||"object"==typeof t&&!(t instanceof tensor_Tensor)&&!(t instanceof Promise)&&!r)}function canTensorify(t){return null==t||isPrimitive(t)||Array.isArray(t)||"object"==typeof t&&t instanceof tensor_Tensor||isTypedArray(t)}function isPrimitive(t){return null===t||"object"!=typeof t&&"function"!=typeof t}function deepClone(t){return deepMap(t,cloneIfTensor)}function cloneIfTensor(t){return t instanceof tensor_Tensor?{value:t.clone(),recurse:!1}:deep_map_isIterable(t)?{value:null,recurse:!0}:{value:t,recurse:!1}}let RingBuffer=class RingBuffer{constructor(t){if(this.capacity=t,this.begin=0,this.end=0,null==t)throw RangeError("Can't create a ring buffer of unknown capacity.");if(t<1)throw RangeError("Can't create ring buffer of capacity < 1.");this.data=Array(t),this.doubledCapacity=2*t}wrap(t){for(;t<0;)t+=this.doubledCapacity;return t%this.doubledCapacity}get(t){if(t<0)throw RangeError("Can't get item at a negative index.");return this.data[t%this.capacity]}set(t,r){if(t<0)throw RangeError("Can't set item at a negative index.");this.data[t%this.capacity]=r}length(){let t=this.end-this.begin;return t<0&&(t=this.doubledCapacity+t),t}isFull(){return this.length()===this.capacity}isEmpty(){return 0===this.length()}push(t){if(this.isFull())throw RangeError("Ring buffer is full.");this.set(this.end,t),this.end=this.wrap(this.end+1)}pushAll(t){for(let r of t)this.push(r)}pop(){if(this.isEmpty())throw RangeError("Ring buffer is empty.");this.end=this.wrap(this.end-1);let t=this.get(this.end);return this.set(this.end,void 0),t}unshift(t){if(this.isFull())throw RangeError("Ring buffer is full.");this.begin=this.wrap(this.begin-1),this.set(this.begin,t)}shift(){if(this.isEmpty())throw RangeError("Ring buffer is empty.");let t=this.get(this.begin);return this.set(this.begin,void 0),this.begin=this.wrap(this.begin+1),t}shuffleExcise(t){if(this.isEmpty())throw RangeError("Ring buffer is empty.");let r=this.wrap(this.begin+t),a=this.get(r);return this.set(r,this.pop()),a}};let GrowingRingBuffer=class GrowingRingBuffer extends RingBuffer{constructor(){super(GrowingRingBuffer.INITIAL_CAPACITY)}isFull(){return!1}push(t){super.isFull()&&this.expand(),super.push(t)}unshift(t){super.isFull()&&this.expand(),super.unshift(t)}expand(){let t=2*this.capacity,r=Array(t),a=this.length();for(let t=0;t!0===t)}rowMajorBatch(t,r=!0){return new RowMajorBatchIterator(this,t,r)}columnMajorBatch(t,r=!0,a=zipToList){return this.rowMajorBatch(t,r).map(t=>deepZip(t,a))}concatenate(t,r){return new ChainedIterator(lazy_iterator_iteratorFromItems([this,t]),r)}take(t){return t<0||null==t?this:new TakeIterator(this,t)}skip(t){return t<0||null==t?this:new SkipIterator(this,t)}prefetch(t){return new PrefetchIterator(this,t)}shuffle(t,r){return new ShuffleIterator(this,t,r)}serial(){return new SerialIterator(this)}};let ArrayIterator=class ArrayIterator extends LazyIterator{constructor(t){super(),this.items=t,this.trav=0}summary(){return`Array of ${this.items.length} items`}async next(){if(this.trav>=this.items.length)return{value:null,done:!0};let t=this.items[this.trav];return this.trav++,{value:deepClone(t),done:!1}}};let FunctionCallIterator=class FunctionCallIterator extends LazyIterator{constructor(t){super(),this.nextFn=t}summary(){return"Function call"}async next(){try{return this.nextFn()}catch(t){throw t.message=`Error thrown while iterating through a dataset: ${t.message}`,t}}};let SerialIterator=class SerialIterator extends LazyIterator{constructor(t){super(),this.upstream=t,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Serial`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){return this.upstream.next()}};let SkipIterator=class SkipIterator extends LazyIterator{constructor(t,r){super(),this.upstream=t,this.maxCount=r,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Skip`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.count++ Take`}async next(){return this.count++>=this.maxCount?{value:null,done:!0}:this.upstream.next()}};let RowMajorBatchIterator=class RowMajorBatchIterator extends LazyIterator{constructor(t,r,a=!0){super(),this.upstream=t,this.batchSize=r,this.enableSmallLastBatch=a,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> RowMajorBatch`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){let t=[];for(;t.length0)return{value:t,done:!1};return{value:null,done:!0}}t.push(r.value)}return{value:t,done:!1}}};let FilterIterator=class FilterIterator extends LazyIterator{constructor(t,r){super(),this.upstream=t,this.predicate=r,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Filter`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;){let t=await this.upstream.next();if(t.done||this.predicate(t.value))return t;globals_dispose(t.value)}}};let MapIterator=class MapIterator extends LazyIterator{constructor(t,r){super(),this.upstream=t,this.transform=r}summary(){return`${this.upstream.summary()} -> Map`}async next(){let t=await this.upstream.next();if(t.done)return{value:null,done:!0};let r=getTensorsInContainer(t.value),a=this.transform(t.value),n=getTensorsInContainer(a);for(let t of r)isTensorInList(t,n)||t.dispose();return{value:a,done:!1}}};let ErrorHandlingLazyIterator=class ErrorHandlingLazyIterator extends LazyIterator{constructor(t,r){super(),this.upstream=t,this.handler=r,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> handleErrors`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;)try{return await this.upstream.next()}catch(t){if(!this.handler(t))return{value:null,done:!0}}}};let AsyncMapIterator=class AsyncMapIterator extends LazyIterator{constructor(t,r){super(),this.upstream=t,this.transform=r}summary(){return`${this.upstream.summary()} -> AsyncMap`}async next(){let t=await this.upstream.next();if(t.done)return{value:null,done:!0};let r=getTensorsInContainer(t.value),a=await this.transform(t.value),n=getTensorsInContainer(a);for(let t of r)isTensorInList(t,n)||t.dispose();return{value:a,done:!1}}};let OneToManyIterator=class OneToManyIterator extends LazyIterator{constructor(){super(),this.outputQueue=new GrowingRingBuffer,this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;0===this.outputQueue.length();)if(!await this.pump())return{value:null,done:!0};return{value:this.outputQueue.shift(),done:!1}}};let FlatmapIterator=class FlatmapIterator extends OneToManyIterator{constructor(t,r){super(),this.upstream=t,this.transform=r}summary(){return`${this.upstream.summary()} -> Flatmap`}async pump(){let t=await this.upstream.next();if(t.done)return!1;let r=getTensorsInContainer(t.value),a=this.transform(t.value),n=getTensorsInContainer(a);for(let t of(this.outputQueue.pushAll(a),r))isTensorInList(t,n)||t.dispose();return!0}};let ChainedIterator=class ChainedIterator extends LazyIterator{constructor(t,r){super(),this.baseErrorHandler=r,this.lastRead=null,this.iterator=null,this.moreIterators=t}summary(){return"TODO: fill in upstream of chained summaries -> Chained"}async next(){return this.lastRead=this.readFromChain(this.lastRead),this.lastRead}async readFromChain(t){if(await t,null==this.iterator){let t=await this.moreIterators.next();if(t.done)return{value:null,done:!0};this.iterator=t.value,null!=this.baseErrorHandler&&(this.iterator=this.iterator.handleErrors(this.baseErrorHandler))}let r=await this.iterator.next();return r.done?(this.iterator=null,this.readFromChain(t)):r}};(L=ei||(ei={}))[L.FAIL=0]="FAIL",L[L.SHORTEST=1]="SHORTEST",L[L.LONGEST=2]="LONGEST";let PrefetchIterator=class PrefetchIterator extends LazyIterator{constructor(t,r){super(),this.upstream=t,this.bufferSize=r,this.buffer=new RingBuffer(r)}summary(){return`${this.upstream.summary()} -> Prefetch`}refill(){for(;!this.buffer.isFull();){let t=this.upstream.next();this.buffer.push(t)}}next(){return this.refill(),this.buffer.shift()}};let ShuffleIterator=class ShuffleIterator extends PrefetchIterator{constructor(t,r,a){super(t,r),this.upstream=t,this.windowSize=r,this.upstreamExhausted=!1,this.random=sG.alea(a||util_now().toString()),this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}randomInt(t){return Math.floor(this.random()*t)}chooseIndex(){return this.randomInt(this.buffer.length())}async serialNext(){for(this.upstreamExhausted||this.refill();!this.buffer.isEmpty();){let t=this.chooseIndex(),r=await this.buffer.shuffleExcise(t);if(!r.done)return this.refill(),r;this.upstreamExhausted=!0}return{value:null,done:!0}}};let Dataset=class Dataset{constructor(){this.size=null}batch(t,r=!0){let a=this;return assert(t>0,()=>`batchSize needs to be positive, but it is ${t}`),dataset_datasetFromIteratorFn(async()=>(await a.iterator()).columnMajorBatch(t,r,deepBatchConcat),this.size===1/0||null==this.size?this.size:r?Math.ceil(this.size/t):Math.floor(this.size/t))}concatenate(t){let r=this;return dataset_datasetFromIteratorFn(async()=>(await r.iterator()).concatenate(await t.iterator()),this.size===1/0||t.size===1/0?1/0:null!=this.size&&null!=t.size?this.size+t.size:null)}filter(t){let r=this;return dataset_datasetFromIteratorFn(async()=>(await r.iterator()).filter(r=>globals_tidy(()=>t(r))),this.size===1/0?1/0:null)}async forEachAsync(t){return(await this.iterator()).forEachAsync(t)}map(t){let r=this;return dataset_datasetFromIteratorFn(async()=>(await r.iterator()).map(r=>globals_tidy(()=>t(r))),this.size)}mapAsync(t){let r=this;return dataset_datasetFromIteratorFn(async()=>(await r.iterator()).mapAsync(t),this.size)}prefetch(t){if(null==t)throw RangeError("`Dataset.prefetch()` requires bufferSize to be specified.");let r=this;return dataset_datasetFromIteratorFn(async()=>(await r.iterator()).prefetch(t),this.size)}repeat(t){let r=this;return dataset_datasetFromIteratorFn(async()=>iteratorFromConcatenated(lazy_iterator_iteratorFromFunction(async()=>({value:await r.iterator(),done:!1})).take(t)),null!=this.size&&t>0?this.size*t:0===t?0:null!=this.size&&(void 0===t||t<0)?1/0:null)}skip(t){let r=this;return dataset_datasetFromIteratorFn(async()=>(await r.iterator()).skip(t),null!=this.size&&t>=0&&this.size>=t?this.size-t:null!=this.size&&(this.size{let r=s.int32();return a&&(r+=s.int32()),(await n.iterator()).shuffle(t,r.toString())},this.size)}take(t){let r=this;return dataset_datasetFromIteratorFn(async()=>(await r.iterator()).take(t),null!=this.size&&this.size>t?t:null!=this.size&&this.size<=t?this.size:null)}async toArray(){if(this.size===1/0)throw Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArray()}async toArrayForTest(){if(this.size===1/0)throw Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArrayForTest()}};function dataset_datasetFromIteratorFn(t,r=null){return new class extends Dataset{constructor(){super(...arguments),this.size=r}async iterator(){return t()}}}function deepBatchConcat(t){return null===t?null:canTensorify(t[0])?{value:batchConcat(t),recurse:!1}:{value:null,recurse:!0}}function batchConcat(t){if(0===t.length)throw Error("Can't make a batch of zero elements.");return t[0]instanceof tensor_Tensor?ig(t):tensor(t)}Dataset.MAX_BUFFER_SIZE=1e4,Symbol("out"),Symbol("field"),Symbol("quote"),Symbol("quoteafterquote"),Symbol("quoteinquote");let StringIterator=class StringIterator extends LazyIterator{split(t){return new SplitIterator(this,t)}};let SplitIterator=class SplitIterator extends StringIterator{constructor(t,r){super(),this.upstream=t,this.impl=new SplitIteratorImpl(t,r)}summary(){return this.impl.summary()}async next(){return this.impl.next()}};let SplitIteratorImpl=class SplitIteratorImpl extends OneToManyIterator{constructor(t,r){super(),this.upstream=t,this.separator=r,this.carryover=""}summary(){return`${this.upstream.summary()} -> Split('${this.separator}')`}async pump(){let t=await this.upstream.next();if(t.done)return""!==this.carryover&&(this.outputQueue.push(this.carryover),this.carryover="",!0);let r=t.value.split(this.separator);for(let t of(r[0]=this.carryover+r[0],r.slice(0,-1)))this.outputQueue.push(t);return this.carryover=r[r.length-1],!0}};function assertNotComplex(t,r){Array.isArray(t)||(t=[t]),t.forEach(t=>{null!=t&&assert("complex64"!==t.dtype,()=>`${r} does not support complex64 tensors in the CPU backend.`)})}let lF=whereImpl;let MathBackendCPU=class MathBackendCPU extends KernelBackend{nextDataId(){return MathBackendCPU.nextDataId++}constructor(){super(),this.blockSize=48,this.firstUse=!0,this.data=new DataStorage(this,ay)}write(t,r,a){this.firstUse&&(this.firstUse=!1,eV.get("IS_NODE")&&warn(` ============================ Hi, looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, visit https://github.com/tensorflow/tfjs-node for more details. ============================`));let n={id:this.nextDataId()};return this.data.set(n,{values:t,dtype:a,refCount:1}),n}makeTensorInfo(t,r,a){let n;if("string"===r&&null!=a&&a.length>0&&isString(a[0])){let s=a.map(t=>encodeString(t));n=this.write(s,t,r)}else n=this.write(a,t,r);return{dataId:n,shape:t,dtype:r}}refCount(t){return this.data.has(t)?this.data.get(t).refCount:0}incRef(t){let r=this.data.get(t);r.refCount++}decRef(t){if(this.data.has(t)){let r=this.data.get(t);r.refCount--}}move(t,r,a,n,s){this.data.set(t,{values:r,dtype:n,refCount:s})}numDataIds(){return this.data.numDataIds()}async read(t){return this.readSync(t)}readSync(t){let{dtype:r,complexTensorInfos:a}=this.data.get(t);return"complex64"===r?mergeRealAndImagArrays(this.readSync(a.real.dataId),this.readSync(a.imag.dataId)):convertBackendValuesAndArrayBuffer(this.data.get(t).values,r)}bufferSync(t){let r=this.readSync(t.dataId);if("string"===t.dtype)try{let a=r.map(t=>decodeString(t));return buffer(t.shape,t.dtype,a)}catch{throw Error("Failed to decode encoded string bytes into utf-8")}return buffer(t.shape,t.dtype,r)}makeOutput(t,r,a){return ay.makeTensorFromTensorInfo(this.makeTensorInfo(r,a,t),this)}disposeData(t,r=!1){if(this.data.has(t)){if(this.data.get(t).refCount--,!r&&this.data.get(t).refCount>0)return!1;let{complexTensorInfos:a}=this.data.get(t);null!=a&&(this.disposeData(a.real.dataId,!0),this.disposeData(a.imag.dataId,!0)),this.data.delete(t)}return!0}disposeIntermediateTensorInfo(t){this.disposeData(t.dataId)}async time(t){let r=util_now();return t(),{kernelMs:util_now()-r}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}where(t){assertNotComplex([t],"where");let r=this.readSync(t.dataId);return lF(t.shape,r)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}};function createSimpleUnaryImpl(t){return(r,a,n)=>{let s=getArrayFromDType(a,r.length);for(let a=0;a{let o,{x:l}=n;assertNotComplex(l,t);let u=i.data.get(l.dataId).values;if("string"===l.dtype){if(!Array.isArray(u))throw Error("String tensor's value was not an instance of Array");o=fromUint8ToStringArray(u)}else o=u;let p=a||l.dtype,m=r(o,p,s);return i.makeTensorInfo(l.shape,p,m)}}MathBackendCPU.nextDataId=0,registerBackend("cpu",()=>new MathBackendCPU,1);let lD=unaryKernelFunc("Elu",t=>t>=0?t:Math.exp(t)-1);function Identity_identity(t){let{inputs:r,backend:a}=t,{x:n}=r;return a.incRef(n.dataId),{dataId:n.dataId,shape:n.shape,dtype:n.dtype}}function LeakyRelu_leakyRelu(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{alpha:i}=n;assertNotComplex([s],"leakyRelu");let o=sizeFromShape(s.shape),l=a.data.get(s.dataId).values,u=getArrayFromDType("float32",o);for(let t=0;t{let o=assertAndGetBroadcastShape(r,a),l=o.length,u=computeStrides(o),p=getArrayFromDType(i,sizeFromShape(o)),m=r.length,y=a.length,_=computeStrides(r),w=computeStrides(a),I=getBroadcastDims(r,o),C=getBroadcastDims(a,o);if(I.length+C.length===0)for(let r=0;ri[t]=0);let o=locToIndex(i,m,_),E=a.slice(-y);C.forEach(t=>E[t]=0);let A=locToIndex(E,y,w);p[r]=t(n[o],s[A])}return[p,o]}}let lP=createSimpleBinaryKernelImpl((t,r)=>t<0?r*t:t);function Prelu_prelu(t){let{inputs:r,backend:a}=t,{x:n,alpha:s}=r;assertNotComplex([n,s],"prelu");let i=a.data.get(n.dataId).values,o=a.data.get(s.dataId).values,[l,u]=lP(n.shape,s.shape,i,o,"float32");return a.makeTensorInfo(u,"float32",l)}let lO=unaryKernelFunc(rx,t=>Math.max(0,t)),lM=unaryKernelFunc(rw,t=>Math.min(Math.max(0,t),6)),lL=createSimpleUnaryImpl(t=>1/(1+Math.exp(-t))),lz=unaryKernelFunc(rM,t=>1/(1+Math.exp(-t)));function fused_utils_applyActivation(t,r,a,n,s){if("linear"===a)return Identity_identity({inputs:{x:r},backend:t});if("relu"===a)return lO({inputs:{x:r},backend:t});if("elu"===a)return lD({inputs:{x:r},backend:t});if("relu6"===a)return lM({inputs:{x:r},backend:t});if("prelu"===a)return Prelu_prelu({inputs:{x:r,alpha:n},backend:t});else if("leakyrelu"===a)return LeakyRelu_leakyRelu({inputs:{x:r},backend:t,attrs:{alpha:s}});else if("sigmoid"===a)return lz({inputs:{x:r},backend:t});throw Error(`Activation ${a} has not been implemented for the CPU backend.`)}function Complex_complex(t){let{inputs:r,backend:a}=t,{real:n,imag:s}=r,i=a.data.get(n.dataId).values,o=a.data.get(s.dataId).values,l=a.makeTensorInfo(n.shape,"complex64");return a.data.get(l.dataId).complexTensorInfos={real:a.makeTensorInfo(n.shape,"float32",i),imag:a.makeTensorInfo(s.shape,"float32",o)},l}function zeros_impl_zeros(t,r,a="float32"){if("complex64"===a)return Complex_complex({inputs:{real:zeros_impl_zeros(t,r,"float32"),imag:zeros_impl_zeros(t,r,"float32")},backend:t});let n=makeZerosTypedArray(sizeFromShape(r),a);return t.makeTensorInfo(r,a,n)}function Real_real(t){let{inputs:r,backend:a}=t,{input:n}=r,s=a.data.get(n.dataId).complexTensorInfos.real,i=a.data.get(s.dataId).values;return a.makeTensorInfo(s.shape,s.dtype,i)}function castImpl(t,r,a,n){if("int32"===n)return[r,"int32",Int32Array.from(t)];if("bool"===n){let n=toTypedArray([0],a),[s,i]=createSimpleBinaryKernelImpl((t,r)=>+(t!==r))(r,[],t,n,"bool");return[i,"bool",s]}throw Error(`Error in Cast: failed to cast ${a} to ${n}`)}function Cast_cast(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{dtype:i}=n;if("complex64"===i){if("complex64"===s.dtype)return Identity_identity({inputs:{x:s},backend:a});let t=zeros_impl_zeros(a,s.shape,s.dtype),r=Cast_cast({inputs:{x:s},backend:a,attrs:{dtype:"float32"}}),n=Complex_complex({inputs:{real:r,imag:t},backend:a});return a.disposeIntermediateTensorInfo(t),a.disposeIntermediateTensorInfo(r),n}if("complex64"===s.dtype){let t=Real_real({inputs:{input:s},backend:a}),r=Cast_cast({inputs:{x:t},backend:a,attrs:{dtype:i}});return a.disposeIntermediateTensorInfo(t),r}if(!hasEncodingLoss(s.dtype,i)){let t=Identity_identity({inputs:{x:s},backend:a});return{dataId:t.dataId,shape:t.shape,dtype:i}}let[o,l,u]=castImpl(a.data.get(s.dataId).values,s.shape,s.dtype,i);return a.makeTensorInfo(o,l,u)}function binaryKernelFunc(t,r,a,n){return null==a?({inputs:a,backend:s})=>{let{a:i,b:o}=a;assertNotComplex([i,o],t);let l=s.data.get(i.dataId).values,u=s.data.get(o.dataId).values,p="string"===i.dtype?fromUint8ToStringArray(l):l,m="string"===i.dtype?fromUint8ToStringArray(u):u,y=n||i.dtype,[_,w]=r(i.shape,o.shape,p,m,y);return s.makeTensorInfo(w,y,_)}:({inputs:t,backend:s})=>{let{a:i,b:o}=t;if("complex64"===i.dtype||"complex64"===o.dtype){let t=Cast_cast({inputs:{x:i},backend:s,attrs:{dtype:"complex64"}}),r=s.data.get(t.dataId),n=r.complexTensorInfos.real,l=r.complexTensorInfos.imag,u=s.data.get(n.dataId).values,p=s.data.get(l.dataId).values,m=Cast_cast({inputs:{x:o},backend:s,attrs:{dtype:"complex64"}}),y=s.data.get(m.dataId),_=y.complexTensorInfos.real,w=y.complexTensorInfos.imag,I=s.data.get(_.dataId).values,C=s.data.get(w.dataId).values,[E,A,$]=a(i.shape,o.shape,u,p,I,C),F=s.makeTensorInfo($,"float32",E),D=s.makeTensorInfo($,"float32",A),P=Complex_complex({inputs:{real:F,imag:D},backend:s});return s.disposeIntermediateTensorInfo(t),s.disposeIntermediateTensorInfo(m),s.disposeIntermediateTensorInfo(F),s.disposeIntermediateTensorInfo(D),P}{let t=s.data.get(i.dataId).values,a=s.data.get(o.dataId).values,l=n||i.dtype,[u,p]=r(i.shape,o.shape,t,a,l);return s.makeTensorInfo(p,l,u)}}}function createComplexBinaryKernelImpl(t){return(r,a,n,s,i,o)=>{let l=assertAndGetBroadcastShape(r,a),u=sizeFromShape(l),p=l.length,m=computeStrides(l),y=getArrayFromDType("float32",u),_=getArrayFromDType("float32",u),w=getBroadcastDims(r,l),I=getBroadcastDims(a,l),C=mergeRealAndImagArrays(n,s),E=mergeRealAndImagArrays(i,o),A=r.length,$=computeStrides(r),F=a.length,D=computeStrides(a);if(w.length+I.length===0)for(let r=0;rn[t]=0);let s=locToIndex(n,A,$),i=a.slice(-F);I.forEach(t=>i[t]=0);let o=locToIndex(i,F,D),l=t(C[2*s],C[2*s+1],E[2*o],E[2*o+1]);y[r]=l.real,_[r]=l.imag}return[y,_,l]}}let lV=createSimpleBinaryKernelImpl((t,r)=>t+r),lB=binaryKernelFunc("Add",lV,createComplexBinaryKernelImpl((t,r,a,n)=>({real:t+a,imag:r+n})));function Reshape_reshape(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{shape:i}=n,o=sizeFromShape(s.shape),l=inferFromImplicitShape(i,o),u=sizeFromShape(l);assert(o===u,()=>`The new shape (${l}) has ${u} elements and the old shape (${s.shape}) has ${o} elements. The new shape and old shape must have the same number of elements.`),a.incRef(s.dataId);let p=a.data.get(s.dataId);if(null!=p.complexTensorInfos){let t=p.complexTensorInfos.real,r=p.complexTensorInfos.imag;t.shape=l,r.shape=l}return{dataId:s.dataId,shape:l,dtype:s.dtype}}function batchMatMul(t){let{inputs:r,backend:a,attrs:n}=t,{a:s,b:i}=r,{transposeA:o,transposeB:l}=n;assertNotComplex([s,i],"matMul");let u=s.shape.length,p=i.shape.length,m=o?s.shape[u-2]:s.shape[u-1],y=l?i.shape[p-1]:i.shape[p-2],_=o?s.shape[u-1]:s.shape[u-2],w=l?i.shape[p-2]:i.shape[p-1],I=s.shape.slice(0,-2),C=i.shape.slice(0,-2),E=sizeFromShape(I),A=sizeFromShape(C),$=assertAndGetBroadcastShape(s.shape.slice(0,-2),i.shape.slice(0,-2)).concat([_,w]);assert(m===y,()=>`Error in matMul: inner shapes (${m}) and (${y}) of Tensors with shapes ${s.shape} and ${i.shape} and transposeA=${o} and transposeB=${l} must match.`);let F=Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:o?[E,m,_]:[E,_,m]}}),D=Reshape_reshape({inputs:{x:i},backend:a,attrs:{shape:l?[A,w,y]:[A,y,w]}}),P=o?F.shape[1]:F.shape[2],L=o?F.shape[2]:F.shape[1],z=l?D.shape[1]:D.shape[2],B=Math.max(E,A),G=a.data.get(F.dataId).values,j=a.data.get(D.dataId).values,K=computeStrides(F.shape),H=computeStrides(D.shape),[q,Z,Q]=o?[K[0],1,K[1]]:[K[0],K[1],1],[ee,et,er]=l?[1,H[1],H[0]]:[H[1],1,H[0]],en=L*z,es=buffer([B,L,z],F.dtype),ei=es.values,eo=a.blockSize;for(let t=0;tMath.acos(t)),lU=unaryKernelFunc(eW,t=>Math.acosh(t));function AddN_addN(t){let{inputs:r,backend:a}=t;assertNotComplex(r,"addN");let n=r.map(t=>a.data.get(t.dataId).values),s=buffer(r[0].shape,r[0].dtype),i=s.values;for(let t=0;ta&&(a=s,n=t)}_[t]=n}return p.forEach(t=>a.disposeIntermediateTensorInfo(t)),a.makeTensorInfo(m,"int32",_)}function ArgMin_argMin(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{axis:i}=n;assertNotComplex(s,"argMin");let o=parseAxisParam(i,s.shape),l=getAxesPermutation(o,s.shape.length),u=s,p=[];null!=l&&(p.push(u=Transpose_transpose({inputs:{x:s},backend:a,attrs:{perm:l}})),o=getInnerMostAxes(o.length,u.shape.length)),assertAxesAreInnerMostDims("argMin",o=[o[0]],u.shape.length);let[m,y]=computeOutAndReduceShapes(u.shape,o),_=makeZerosTypedArray(sizeFromShape(m),"int32"),w=sizeFromShape(y),I=a.data.get(u.dataId).values;for(let t=0;t<_.length;++t){let r=t*w,a=I[r],n=0;for(let t=0;ta.disposeIntermediateTensorInfo(t)),a.makeTensorInfo(m,"int32",_)}let lG=unaryKernelFunc(eK,t=>Math.asin(t)),lj=unaryKernelFunc(eH,t=>Math.asinh(t)),lK=unaryKernelFunc(eq,t=>Math.atan(t)),lH=binaryKernelFunc(eY,createSimpleBinaryKernelImpl((t,r)=>Math.atan2(t,r))),lq=unaryKernelFunc(eX,t=>Math.atanh(t));function pool_utils_pool(t,r,a,n,s,i){let o=s.strideHeight,l=s.strideWidth,u=s.dilationHeight,p=s.dilationWidth,m=s.effectiveFilterHeight,y=s.effectiveFilterWidth,_=s.padInfo.top,w=s.padInfo.left,I="max"===i?-1/0:1/0,C=buffer(s.outShape,a),E=C.values,A=s.outShape[1]*s.outShape[2]*s.outShape[3],$=s.outShape[2]*s.outShape[3],F=s.outShape[3];for(let r=0;rA?A=o:"avg"===i&&($+=o,D++)}if(isNaN(A))break}E[z+a*F+r]="avg"===i?$/D:A}}}return C}function maxPoolPositions(t,r,a,n,s=!1,i=!1){let o=buffer(n.outShape,"int32"),l=n.strideHeight,u=n.strideWidth,p=n.dilationHeight,m=n.dilationWidth,y=n.effectiveFilterHeight,_=n.effectiveFilterWidth,w=n.padInfo.top,I=n.padInfo.left,C=buffer(r,a,t);for(let t=0;tD&&(D=p,P=s?i?((t*n.inHeight+a)*n.inWidth+l)*n.inChannels+r:(a*n.inWidth+l)*n.inChannels+r:o*_+u)}}o.set(P,t,a,l,r)}}return o}function pool_utils_pool3d(t,r,a,n,s,i){let o=s.strideDepth,l=s.strideHeight,u=s.strideWidth,p=s.dilationDepth,m=s.dilationHeight,y=s.dilationWidth,_=s.effectiveFilterDepth,w=s.effectiveFilterHeight,I=s.effectiveFilterWidth,C=s.padInfo.front,E=s.padInfo.top,A=s.padInfo.left,$="max"===i?-1/0:1/0,F=buffer(s.outShape,a),D=F.values,P=s.outShape[1]*s.outShape[2]*s.outShape[3]*s.outShape[4],L=s.outShape[2]*s.outShape[3]*s.outShape[4],z=s.outShape[3]*s.outShape[4],B=s.outShape[4];for(let r=0;rL?L=s:"avg"===i&&(z+=s,G++),isNaN(L))break}if(isNaN(L))break}if(isNaN(L))break}D[E+r]="avg"===i?z/Math.max(G,1):L}}}}return F}function maxPool3dPositions(t,r){let a=buffer(r.outShape,"int32"),n=r.strideDepth,s=r.strideHeight,i=r.strideWidth,o=r.dilationDepth,l=r.dilationHeight,u=r.dilationWidth,p=r.effectiveFilterDepth,m=r.effectiveFilterHeight,y=r.effectiveFilterWidth,_=r.padInfo.front,w=r.padInfo.top,I=r.padInfo.left;for(let C=0;C=B&&(B=l,G=a*m*y+s*m+o)}}}a.set(G,C,A,n,s,E)}}}return a}function AvgPool_avgPool(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a;assertNotComplex(i,"avgPool");let{filterSize:o,strides:l,pad:u,dimRoundingMode:p}=s;assert(eitherStridesOrDilationsAreOne(l,1),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${l} and dilations '1'`);let m=computePool2DInfo(i.shape,o,l,1,u,p);if(1===m.filterWidth&&1===m.filterHeight&&arraysEqual(m.inShape,m.outShape))r=Identity_identity({inputs:{x:i},backend:n});else{let t=n.data.get(i.dataId).values,a=computeStrides(i.shape),s=pool_utils_pool(t,i.shape,i.dtype,a,m,"avg");r=n.makeTensorInfo(m.outShape,i.dtype,s.values)}return r}function avgPool3D(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{filterSize:i,strides:o,pad:l,dimRoundingMode:u,dataFormat:p}=n;assertNotComplex(s,"avgPool3d");let m=computePool3DInfo(s.shape,i,o,1,l,u,p),y=pool_utils_pool3d(a.data.get(s.dataId).values,s.shape,s.dtype,computeStrides(s.shape),m,"avg");return a.makeTensorInfo(y.shape,"float32",y.values)}function avgPool3DGrad(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,input:i}=r,{filterSize:o,strides:l,pad:u,dimRoundingMode:p}=n;assertNotComplex([s,i],"avgPool3DGrad");let m=computePool3DInfo(i.shape,o,l,1,u,p),y=m.strideDepth,_=m.strideHeight,w=m.strideWidth,I=m.filterDepth,C=m.filterHeight,E=m.filterWidth,A=m.dilationDepth,$=m.dilationHeight,F=m.dilationWidth,D=m.effectiveFilterDepth,P=m.effectiveFilterHeight,L=m.effectiveFilterWidth,z=D-1-m.padInfo.front,B=L-1-m.padInfo.left,G=P-1-m.padInfo.top,j=buffer(i.shape,"float32"),K=1/(I*C*E),H=a.bufferSync(s);for(let t=0;t=m.outDepth)&&Math.floor(n)===n)for(let a=0;a=m.outHeight)&&Math.floor(s)===s)for(let a=0;a=m.outWidth||Math.floor(i)!==i||(u+=H.get(t,n,s,i,r))}}}j.set(u*K,t,a,n,s,r)}return a.makeTensorInfo(j.shape,j.dtype,j.values)}function AvgPoolGrad_avgPoolGrad(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,input:i}=r;assertNotComplex([s,i],"avgPoolGrad");let{filterSize:o,strides:l,pad:u}=n,p=computePool2DInfo(i.shape,o,l,1,u),m=p.strideHeight,y=p.strideWidth,_=p.filterHeight,w=p.filterWidth,I=p.dilationHeight,C=p.dilationWidth,E=p.effectiveFilterHeight,A=p.effectiveFilterWidth,$=A-1-p.padInfo.left,F=E-1-p.padInfo.top,D=buffer(i.shape,"float32"),P=1/(_*w),L=a.data.get(s.dataId).values,z=buffer(s.shape,"float32",L);for(let t=0;t=p.outHeight)&&Math.floor(n)===n)for(let a=0;a=p.outWidth||Math.floor(s)!==s||(o+=z.get(t,n,s,r))}}D.set(o*P,t,a,n,r)}return a.makeTensorInfo(D.shape,D.dtype,D.values)}function BatchNorm_batchNorm(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,scale:i,offset:o,mean:l,variance:u}=r;assert(l.shape.length===u.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),assert(null==o||l.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),assert(null==i||l.shape.length===i.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks."),assertNotComplex([s,l,u,i,o],"batchNorm");let{varianceEpsilon:p}=n;null==p&&(p=.001);let m=a.data.get(s.dataId).values,y=a.data.get(l.dataId).values,_=a.data.get(u.dataId).values,w=i?a.data.get(i.dataId).values:new Float32Array([1]),I=o?a.data.get(o.dataId).values:new Float32Array([0]),C=new Float32Array(m.length),E=I.length,A=w.length,$=_.length,F=y.length,D=0,P=0,L=0,z=0;for(let t=0;t=E&&(D=0),P>=F&&(P=0),L>=A&&(L=0),z>=$&&(z=0);return a.makeTensorInfo(s.shape,s.dtype,C)}function sliceImpl(t,r,a,n,s){let i=isSliceContinous(n,r,a),o=sizeFromShape(a),l=computeStrides(n);if(i){let a=computeFlatOffset(r,l);return"string"===s?t.slice(a,a+o):t.subarray(a,a+o)}let u="string"===s?fromUint8ToStringArray(t):t,p=buffer(n,s,u),m=buffer(a,s);for(let t=0;tt+r[a]);m.set(p.get(...n),...a)}return"string"===s?fromStringArrayToUint8(m.values):m.values}function Slice_slice(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{begin:i,size:o}=n;assertNotComplex(s,"slice");let[l,u]=parseSliceParams(s,i,o);assertParamsValid(s,l,u);let p=sliceImpl(a.data.get(s.dataId).values,l,u,s.shape,s.dtype);return a.makeTensorInfo(u,s.dtype,p)}function BatchToSpaceND_batchToSpaceND(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{blockShape:i,crops:o}=n;assertNotComplex([s],"batchToSpaceND");let l=i.reduce((t,r)=>t*r),u=getReshaped(s.shape,i,l),p=getPermuted(u.length,i.length),m=getReshapedPermuted(s.shape,i,l),y=getSliceBeginCoords(o,i.length),_=getSliceSize(m,o,i.length),w=Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:u}}),I=Transpose_transpose({inputs:{x:w},backend:a,attrs:{perm:p}}),C=Reshape_reshape({inputs:{x:I},backend:a,attrs:{shape:m}}),E=Slice_slice({inputs:{x:C},backend:a,attrs:{begin:y,size:_}});return a.disposeIntermediateTensorInfo(w),a.disposeIntermediateTensorInfo(I),a.disposeIntermediateTensorInfo(C),E}function bincountImpl(t,r,a,n,s){let i=sizeFromShape(n),o=makeZerosTypedArray(s,a);for(let a=0;a=s||(i>0?o[n]+=r[a]:o[n]+=1)}return o}function bincountReduceImpl(t,r,a,n=!1){let s=t.shape[0],i=t.shape[1],o=buffer([s,a],r.dtype);for(let l=0;l=a||(n?o.set(1,l,i):r.size>0?o.set(o.get(l,i)+r.get(l,s),l,i):o.set(o.get(l,i)+1,l,i))}return o}function Bincount_bincount(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,weights:i}=r,{size:o}=n,l=bincountImpl(a.data.get(s.dataId).values,a.data.get(i.dataId).values,i.dtype,i.shape,o);return a.makeTensorInfo([o],i.dtype,l)}let lX=createSimpleBinaryKernelImpl((t,r)=>t&r),lY=binaryKernelFunc(e4,lX);function BroadcastArgs_broadcastArgs(t){let{inputs:r,backend:a}=t,{s0:n,s1:s}=r,i=a.data.get(n.dataId).values,o=a.data.get(s.dataId).values,l=assertAndGetBroadcastShape(Array.from(i),Array.from(o));return a.makeTensorInfo([l.length],"int32",Int32Array.from(l))}let lZ=createSimpleUnaryImpl(t=>Math.ceil(t)),lJ=unaryKernelFuncFromImpl(e8,lZ),lQ=unaryKernelFunc(e7,(t,r)=>t>r.clipValueMax?r.clipValueMax:t{let a=sizeFromShape(t.shape);s.set(t.vals,r),r+=a})}else{let n=0;t.forEach(t=>{let i="string"===a?fromUint8ToStringArray(t.vals):t.vals,o=0;for(let a=0;at.shape),i);let o=concat_util_computeOutShape(r.map(t=>t.shape),i);if(0===sizeFromShape(o))return a.makeTensorInfo(o,r[0].dtype,[]);let l=r.filter(t=>sizeFromShape(t.shape)>0);if(1===l.length)return Identity_identity({inputs:{x:l[0]},backend:a});if("complex64"===l[0].dtype){let t=l.map(t=>Real_real({inputs:{input:t},backend:a})),r=l.map(t=>Imag_imag({inputs:{input:t},backend:a})),n=Concat_concat({inputs:t,backend:a,attrs:{axis:i}}),s=Concat_concat({inputs:r,backend:a,attrs:{axis:i}}),o=Complex_complex({inputs:{real:n,imag:s},backend:a});return t.forEach(t=>a.disposeIntermediateTensorInfo(t)),r.forEach(t=>a.disposeIntermediateTensorInfo(t)),a.disposeIntermediateTensorInfo(n),a.disposeIntermediateTensorInfo(s),o}let u=l.map(t=>{let r=sizeFromShape(t.shape.slice(i));return Reshape_reshape({inputs:{x:t},backend:a,attrs:{shape:[-1,r]}})}),p=u.map(t=>({vals:a.data.get(t.dataId).values,shape:t.shape}));o=concat_util_computeOutShape(u.map(t=>t.shape),1);let m=1===u[0].shape[0],y=concatImpl(p,o,r[0].dtype,m),_=concat_util_computeOutShape(l.map(t=>t.shape),i),w=a.makeTensorInfo(_,r[0].dtype,y);return u.forEach(t=>a.disposeIntermediateTensorInfo(t)),w}function conv2D(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,filter:i}=r,{strides:o,pad:l,dataFormat:u,dilations:p,dimRoundingMode:m}=n;assertNotComplex([s,i],"conv2d");let y=convertConv2DDataFormat(u),_=computeConv2DInfo(s.shape,i.shape,o,p,l,m,!1,y),w=_.filterHeight,I=_.filterWidth,C=_.dilationHeight,E=_.dilationWidth,A=_.padInfo.left,$=_.padInfo.top,F="channelsLast"===_.dataFormat,D=new TensorBuffer(_.outShape,s.dtype),P=computeStrides(s.shape),L=computeStrides(i.shape),z=P[0],B=F?P[1]:P[2],G=F?P[2]:1,j=F?1:P[1],K=D.strides[0],H=F?D.strides[1]:D.strides[2],q=F?D.strides[2]:1,Z=F?1:D.strides[1],Q=a.data.get(s.dataId).values,ee=a.data.get(i.dataId).values,et=D.values;for(let t=0;t<_.batchSize;++t){let r=t*z,a=t*K;for(let t=0;t<_.outHeight;++t){let n=a+t*H,s=t*_.strideHeight-$;for(let t=0;t=_.inHeight)continue;let i=t*L[0],o=r+a*B;for(let t=0;t<_.outWidth;++t){let r=n+t*q,a=t*_.strideWidth-A;for(let t=0;t=_.inWidth)continue;let s=i+t*L[1],l=o+n*G,u=s;for(let t=0;t<_.inChannels;++t){let a=Q[l+t*j];for(let t=0;t<_.outChannels;++t)et[r+t*Z]+=a*ee[u+t];u+=_.outChannels}}}}}}return a.makeTensorInfo(D.shape,D.dtype,et)}function Conv2DBackpropFilter_conv2DBackpropFilter(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,dy:i}=r,{strides:o,pad:l,dataFormat:u,dimRoundingMode:p,filterShape:m}=n;assertNotComplex([s,i],"conv2dBackpropFilter");let y=convertConv2DDataFormat(u),_=computeConv2DInfo(s.shape,m,o,1,l,p,!1,y),{strideHeight:w,strideWidth:I,filterHeight:C,filterWidth:E}=_,A="channelsLast"===_.dataFormat,$=new TensorBuffer(_.filterShape,"float32"),F=_.padInfo.left,D=_.padInfo.top,P=a.data.get(s.dataId).values,L=a.data.get(i.dataId).values,z=new TensorBuffer(s.shape,s.dtype,P),B=new TensorBuffer(i.shape,i.dtype,L);for(let t=0;t=p.inDepth)continue;let i=t*G[0],o=r+a*B[1];for(let t=0;t=p.inHeight)continue;let s=i+t*G[1],l=o+n*B[2];for(let t=0;t=p.inWidth)continue;let i=s+t*G[2],o=l+r*p.inChannels,u=i;for(let t=0;tMath.cos(t)),l1=unaryKernelFunc(tu,t=>Math.cosh(t));function CropAndResize_cropAndResize(t){let{inputs:r,backend:a,attrs:n}=t,{image:s,boxes:i,boxInd:o}=r,{cropSize:l,method:u,extrapolationValue:p}=n,[m,y,_,w]=s.shape,I=i.shape[0],[C,E]=l,A=buffer([I,C,E,w],"float32"),$=a.data.get(i.dataId).values,F=a.data.get(o.dataId).values,D=a.data.get(s.dataId).values,P=computeStrides(s.shape),L=computeStrides(A.shape);for(let t=0;t=m)continue;let l=C>1?(s-a)*(y-1)/(C-1):0,I=E>1?(i-n)*(_-1)/(E-1):0;for(let r=0;r1?a*(y-1)+r*l:.5*(a+s)*(y-1);if(m<0||m>y-1){for(let a=0;a1?n*(_-1)+u*I:.5*(n+i)*(_-1);if(m<0||m>_-1){for(let a=0;a1?n*(_-1)+a*I:.5*(n+i)*(_-1);if(s<0||s>_-1){for(let n=0;nt+I-r-1:(t,r)=>t+r;for(let t=0;tt+I-r-1:(t,r)=>t+r;for(let t=0;t`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${o}`);let l=s.shape[0],u=s.shape[1],p=s.shape[2],m=s.shape[3],y=u*i,_=p*i,w=m/(i*i),I=a.data.get(s.dataId).values,C=new Float32Array(l*y*_*w),E=0;for(let t=0;t`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${o} and dilations '${_}'`);let w=computeConv2DInfo(s.shape,i.shape,o,_,l,p,!0),{filterHeight:I,filterWidth:C,dilationHeight:E,dilationWidth:A,padInfo:$}=w,F=$.left,D=$.top,P=w.outChannels/w.inChannels,L=new TensorBuffer(w.outShape,s.dtype),z=a.data.get(s.dataId).values,B=a.data.get(i.dataId).values,G=L.values;for(let t=0;t=w.inHeight)continue;let i=t*y[0],o=r+a*m[1];for(let t=0;t=w.inWidth)continue;let s=i+t*y[1],l=o+n*w.inChannels,u=r,p=s;for(let t=0;t1)throw Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${n}.`)}else if("int32"===s.dtype&&(n<0||n>255))throw Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${n}.`);1===I?(r[0]=n*E,r[1]=n*E,r[2]=n*E):r[a]=n*E}let a=4*t;A[a+0]=Math.round(r[0]),A[a+1]=Math.round(r[1]),A[a+2]=Math.round(r[2]),A[a+3]=Math.round(r[3])}i.width=w,i.height=_;let $=new ImageData(A,w,_);return y.putImageData($,0,0),s}let l2=createSimpleBinaryKernelImpl((t,r)=>t*r),l3=binaryKernelFunc(re,l2,createComplexBinaryKernelImpl((t,r,a,n)=>({real:t*a-r*n,imag:t*n+r*a})));function Sum_sum(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a,{axis:o,keepDims:l}=s;assertNotComplex(i,"sum");let u=(r="bool"===i.dtype?Cast_cast({inputs:{x:i},backend:n,attrs:{dtype:"int32"}}):Identity_identity({inputs:{x:i},backend:n})).shape.length,p=parseAxisParam(o,r.shape),m=getAxesPermutation(p,u),y=p,_=r;null!=m&&(_=Transpose_transpose({inputs:{x:r},backend:n,attrs:{perm:m}}),y=getInnerMostAxes(y.length,u)),assertAxesAreInnerMostDims("sum",y,_.shape.length);let[w,I]=computeOutAndReduceShapes(_.shape,y),C=zeros_impl_zeros(n,w,upcastType(_.dtype,"int32")),E=sizeFromShape(I),A=n.data.get(C.dataId).values,$=n.data.get(_.dataId).values;for(let t=0;t=0&&(y=Sum_sum({inputs:{x:y},backend:a,attrs:{axis:u[t]-(i.length-_),keepDims:!1}}),w.push(y)),_--)}for(let t of w)t!==y&&a.disposeIntermediateTensorInfo(t);return y}function eluGrad(t){let{inputs:r,backend:a}=t,{dy:n,y:s}=r;assertNotComplex([n,s],"eluGrad");let i=new Float32Array(sizeFromShape(s.shape)),o=a.data.get(s.dataId).values,l=a.data.get(n.dataId).values;for(let t=0;t=0?i[t]=l[t]:i[t]=l[t]*(r+1)}return a.makeTensorInfo(s.shape,"float32",i)}let l4=createSimpleBinaryKernelImpl((t,r)=>+(t===r)),l6=binaryKernelFunc(tN,l4,null,"bool"),l5=unaryKernelFunc("Erf",t=>{let r=Math.sign(t),a=Math.abs(t),n=1/(1+oS*a);return r*(1-((((oE*n+oC)*n+oN)*n+oI)*n+ow)*n*Math.exp(-a*a))}),l8=createSimpleUnaryImpl(t=>Math.exp(t)),l7=unaryKernelFuncFromImpl("Exp",l8,"float32");function ExpandDims_expandDims(t){let{inputs:r,backend:a,attrs:n}=t,{input:s}=r,{dim:i}=n,o=s.shape.length,l=s.shape.slice(),u=i;return i<0&&(assert(-(o+1)<=i,()=>`Axis must be in the interval [${-(o+1)}, ${o}]`),u=o+i+1),l.splice(u,0,1),Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:l}})}let l9=createSimpleUnaryImpl(t=>Math.expm1(t)),ue=unaryKernelFuncFromImpl(tE,l9),ut=binaryKernelFunc(tS,createSimpleBinaryKernelImpl((t,r)=>t/r)),ur={kernelName:tS,backendName:"cpu",kernelFunc:ut},un=createSimpleBinaryKernelImpl((t,r)=>t-r),us=binaryKernelFunc("Sub",un,createComplexBinaryKernelImpl((t,r,a,n)=>({real:t-a,imag:r-n})));function fftBatch(t,r,a){let n=t.shape,s=n[0],i=n[1],o=a.data.get(t.dataId),l=o.complexTensorInfos.real,u=o.complexTensorInfos.imag,p=[s,i],m=sizeFromShape(p),y=getArrayFromDType("float32",m),_=getArrayFromDType("float32",m);for(let t=0;tMath.floor(t)),uo=unaryKernelFuncFromImpl(tR,ui),ul=binaryKernelFunc(tF,createSimpleBinaryKernelImpl((t,r)=>Math.floor(t/r)),null,"int32");function fusedConv2D(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,filter:i,bias:o,preluActivationWeights:l}=r,{strides:u,pad:p,dataFormat:m,dilations:y,dimRoundingMode:_,activation:w,leakyreluAlpha:I}=n,C=conv2D({inputs:{x:s,filter:i},backend:a,attrs:{strides:u,pad:p,dataFormat:m,dilations:y,dimRoundingMode:_}});if(o){let t=C;if("NCHW"===m&&1===o.shape.length&&1!==o.shape[0]){let t=Reshape_reshape({inputs:{x:o},backend:a,attrs:{shape:[o.shape[0],1,1]}});C=lB({inputs:{a:C,b:t},backend:a}),a.disposeIntermediateTensorInfo(t)}else C=lB({inputs:{a:C,b:o},backend:a});a.disposeIntermediateTensorInfo(t)}if(w){let t=C;if("NCHW"===m&&"prelu"===w&&1===l.shape.length&&1!==l.shape[0]){let t=Reshape_reshape({inputs:{x:l},backend:a,attrs:{shape:[l.shape[0],1,1]}});C=fused_utils_applyActivation(a,C,w,t,I),a.disposeIntermediateTensorInfo(t)}else C=fused_utils_applyActivation(a,C,w,l,I);a.disposeIntermediateTensorInfo(t)}return C}function fusedDepthwiseConv2D(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,filter:i,bias:o,preluActivationWeights:l}=r,{strides:u,pad:p,dataFormat:m,dilations:y,dimRoundingMode:_,activation:w,leakyreluAlpha:I}=n,C=depthwiseConv2dNative({inputs:{x:s,filter:i},backend:a,attrs:{strides:u,pad:p,dataFormat:m,dilations:y,dimRoundingMode:_}});if(o){let t=C;C=lB({inputs:{a:C,b:o},backend:a}),a.disposeIntermediateTensorInfo(t)}if(w){let t=C;C=fused_utils_applyActivation(a,C,w,l,I),a.disposeIntermediateTensorInfo(t)}return C}function gatherNdImpl(t,r,a,n,s,i,o,l,u){let p=buffer([n,i],a);for(let a=0;a=u/i)throw Error(`Invalid indices: ${n} does not index into ${l}`);for(let t=0;t=0,()=>`GatherV2: the index value ${r} is not in [0, ${m-1}]`)}let y=l;null==l&&(y=0);let _=sizeFromShape(i.shape),w=collectGatherOpShapeInfo(s,i,u,y),I=Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:[w.batchSize,w.outerSize,w.dimSize,w.sliceSize]}}),C=Reshape_reshape({inputs:{x:i},backend:a,attrs:{shape:[w.batchSize,_/w.batchSize]}}),E=[w.batchSize,w.outerSize,_/w.batchSize,w.sliceSize],A=a.bufferSync(C),$=gatherV2Impl(a.bufferSync(I),A,E);return a.disposeIntermediateTensorInfo(I),a.disposeIntermediateTensorInfo(C),a.makeTensorInfo(w.outputShape,$.dtype,$.values)}let uu=createSimpleBinaryKernelImpl((t,r)=>+(t>r)),up=binaryKernelFunc(tM,uu,null,"bool"),uh=createSimpleBinaryKernelImpl((t,r)=>+(t>=r)),uc=binaryKernelFunc(tL,uh,null,"bool");function IFFT_ifft(t){let{inputs:r,backend:a}=t,{input:n}=r,s=sizeFromShape(n.shape),i=n.shape[n.shape.length-1],o=Reshape_reshape({inputs:{x:n},backend:a,attrs:{shape:[s/i,i]}}),l=fftBatch(o,!0,a),u=Reshape_reshape({inputs:{x:l},backend:a,attrs:{shape:n.shape}});return a.disposeIntermediateTensorInfo(o),a.disposeIntermediateTensorInfo(l),u}let ud=unaryKernelFunc(tW,t=>+!!Number.isFinite(t),"bool"),um=unaryKernelFunc(tU,t=>+(Math.abs(t)===1/0),"bool"),uf=unaryKernelFunc(tG,t=>+!!Number.isNaN(t),"bool"),ug=createSimpleBinaryKernelImpl((t,r)=>+(t+(t<=r)),uv=binaryKernelFunc(tH,ux,null,"bool");function linSpaceImpl(t,r,a){let n=(r-t)/(a-1),s=makeZerosTypedArray(a,"float32");s[0]=t;for(let t=1;tMath.log(t)),uT=unaryKernelFuncFromImpl("Log",u_),uk=unaryKernelFunc(tX,t=>Math.log1p(t)),uS=binaryKernelFunc(tY,createSimpleBinaryKernelImpl((t,r)=>t&&r),null,"bool"),uw=unaryKernelFunc(tZ,t=>+!t,"bool"),uI=binaryKernelFunc(tJ,createSimpleBinaryKernelImpl((t,r)=>t||r),null,"bool");function lRN(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{depthRadius:i,bias:o,alpha:l,beta:u}=n;assertNotComplex(s,"LRN");let p=s.shape[3],m=p-1,y=a.data.get(s.dataId).values,_=sizeFromShape(s.shape),w=new Float32Array(_);function sumAcrossChannels(t){let r=t%p,a=t-r+Math.max(0,r-i),n=t-r+Math.min(r+i,m),s=0;for(;a<=n;a++){let t=y[a];s+=t*t}return s}for(let t=0;t<_;t++){let r=sumAcrossChannels(t),a=y[t]*Math.pow(o+l*r,-u);w[t]=a}return a.makeTensorInfo(s.shape,s.dtype,w)}function lRNGrad(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,y:i,dy:o}=r,{depthRadius:l,bias:u,alpha:p,beta:m}=n;assertNotComplex(o,"LRNGrad");let y=sizeFromShape(o.shape),_=o.shape[3],w=a.data.get(o.dataId).values,I=a.data.get(s.dataId).values,C=a.data.get(i.dataId).values,E=new Float32Array(y);for(let t=0;ti)&&(i=r)}s[a]=i}return s}function Max_max(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{reductionIndices:i,keepDims:o}=n,l=s.shape,u=l.length,p=parseAxisParam(i,l),m=p,y=getAxesPermutation(m,u),_=a.data.get(s.dataId).values;if(null!=y){let t=Array(u);for(let r=0;rMath.max(t,r)),uC=binaryKernelFunc(t0,uN);function MaxPool_maxPool(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a;assertNotComplex(i,"maxPool");let{filterSize:o,strides:l,pad:u,dimRoundingMode:p}=s;assert(eitherStridesOrDilationsAreOne(l,1),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${l} and dilations '1'`);let m=computePool2DInfo(i.shape,o,l,1,u,p);if(1===m.filterWidth&&1===m.filterHeight&&arraysEqual(m.inShape,m.outShape))r=Identity_identity({inputs:{x:i},backend:n});else{let t=n.data.get(i.dataId).values,a=computeStrides(i.shape),s=pool_utils_pool(t,i.shape,i.dtype,a,m,"max");r=n.makeTensorInfo(m.outShape,i.dtype,s.values)}return r}function maxPool3D(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{filterSize:i,strides:o,pad:l,dimRoundingMode:u,dataFormat:p}=n;assertNotComplex(s,"maxPool3d");let m=computePool3DInfo(s.shape,i,o,1,l,u,p),y=pool_utils_pool3d(a.data.get(s.dataId).values,s.shape,s.dtype,computeStrides(s.shape),m,"max");return a.makeTensorInfo(y.shape,"float32",y.values)}function maxPool3DGrad(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,input:i}=r,{filterSize:o,strides:l,pad:u,dimRoundingMode:p}=n;assertNotComplex([s,i],"maxPool3DGrad");let m=computePool3DInfo(i.shape,o,l,1,u,p),y=maxPool3dPositions(a.bufferSync(i),m),_=m.strideDepth,w=m.strideHeight,I=m.strideWidth,C=m.dilationDepth,E=m.dilationHeight,A=m.dilationWidth,$=m.effectiveFilterDepth,F=m.effectiveFilterHeight,D=m.effectiveFilterWidth,P=$-1-m.padInfo.front,L=D-1-m.padInfo.left,z=F-1-m.padInfo.top,B=buffer(i.shape,"float32"),G=a.bufferSync(s);for(let t=0;t=m.outDepth)&&Math.floor(n)===n)for(let s=0;s=m.outHeight)&&Math.floor(i)===i)for(let o=0;o=m.outWidth||Math.floor(p)!==p)continue;let _=+($*F*D-1-y.get(t,n,i,p,r)===a*F*D+s*D+o);0!==_&&(u+=G.get(t,n,i,p,r)*_)}}}B.set(u,t,a,n,s,r)}return a.makeTensorInfo(B.shape,B.dtype,B.values)}function MaxPoolGrad_maxPoolGrad(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,input:i,output:o}=r;assertNotComplex([i,o],"maxPoolGrad");let{filterSize:l,strides:u,pad:p,dimRoundingMode:m}=n,y=computePool2DInfo(i.shape,l,u,1,p,m),_=a.data.get(i.dataId).values,w=buffer(y.outShape,i.dtype,maxPoolPositions(_,i.shape,i.dtype,y).values),I=y.strideHeight,C=y.strideWidth,E=y.dilationHeight,A=y.dilationWidth,$=y.effectiveFilterHeight,F=y.effectiveFilterWidth,D=F-1-y.padInfo.left,P=$-1-y.padInfo.top,L=buffer(i.shape,"float32"),z=a.data.get(s.dataId).values,B=buffer(s.shape,"float32",z);for(let t=0;t=y.outHeight)&&Math.floor(n)===n)for(let s=0;s=y.outWidth||Math.floor(l)!==l)continue;let u=+($*F-1-w.get(t,n,l,r)===a*F+s);0!==u&&(o+=B.get(t,n,l,r)*u)}}L.set(o,t,a,n,r)}return a.makeTensorInfo(L.shape,L.dtype,L.values)}function maxPoolWithArgmaxImpl(t,r,a,n,s){let i=computeStrides(r),o=pool_utils_pool(t,r,a,i,s,"max"),l=maxPoolPositions(t,r,a,s,!0,n);return[o.values,l.values]}function Mean_mean(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{axis:i,keepDims:o}=n,l=parseAxisParam(i,s.shape),u=sizeFromShape(computeOutAndReduceShapes(s.shape,l)[1]),p=[],m=a.makeTensorInfo([],"float32",new Float32Array([u]));p.push(m);let y=Cast_cast({inputs:{x:s},backend:a,attrs:{dtype:"float32"}});p.push(y);let _=ut({inputs:{a:y,b:m},backend:a});p.push(_);let w=Sum_sum({inputs:{x:_},backend:a,attrs:{axis:i,keepDims:o}});return p.forEach(t=>a.disposeIntermediateTensorInfo(t)),w}function Min_min(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{axis:i,keepDims:o}=n;assertNotComplex(s,"min");let l=parseAxisParam(i,s.shape),u=l,p=getAxesPermutation(u,s.shape.length),m=s;null!=p&&(m=Transpose_transpose({inputs:{x:s},backend:a,attrs:{perm:p}}),u=getInnerMostAxes(u.length,s.shape.length)),assertAxesAreInnerMostDims("min",u,m.shape.length);let[y,_]=computeOutAndReduceShapes(m.shape,u),w=sizeFromShape(_),I=makeZerosTypedArray(sizeFromShape(y),m.dtype),C=a.data.get(m.dataId).values;for(let t=0;tMath.min(t,r)),uA=binaryKernelFunc(t8,uE);function MirrorPad_mirrorPad(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{paddings:i,mode:o}=n;assertNotComplex(s,"mirrorPad");let l=i.map((t,r)=>t[0]+s.shape[r]+t[1]),u=i.map(t=>t[0]),p=i.map((t,r)=>t[0]+s.shape[r]),m=+("reflect"!==o),y=a.data.get(s.dataId).values,_=s.shape.length,w=computeStrides(s.shape),I=sizeFromShape(l),C=l.length,E=computeStrides(l),A=getArrayFromDType(s.dtype,I);for(let t=0;t=p[t]&&(r[t]=(p[t]-1)*2-r[t]+m);let a=locToIndex(r=r.map((t,r)=>t-u[r]),_,w);A[t]=y[a]}return{dataId:a.write(A,l,s.dtype),shape:l,dtype:s.dtype}}let u$=binaryKernelFunc("Mod",createSimpleBinaryKernelImpl((t,r)=>{let a=t%r;return t<0&&r<0||t>=0&&r>=0?a:(a+r)%r}));function Softmax_softmax(t){let{inputs:r,backend:a,attrs:n}=t,{logits:s}=r,{dim:i}=n,o=s.shape.length,l=i;if(-1===l&&(l=o-1),l!==o-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${o} and dim was ${l}`);let u=parseAxisParam([l],s.shape),p=Max_max({inputs:{x:s},backend:a,attrs:{reductionIndices:u,keepDims:!1}}),m=expandShapeToKeepDim(p.shape,u),y=Reshape_reshape({inputs:{x:p},backend:a,attrs:{shape:m}}),_=us({inputs:{a:s,b:y},backend:a}),w=l7({inputs:{x:_},backend:a}),I=Sum_sum({inputs:{x:w},backend:a,attrs:{axis:u,keepDims:!1}}),C=Reshape_reshape({inputs:{x:I},backend:a,attrs:{shape:m}}),E=ut({inputs:{a:w,b:C},backend:a});return a.disposeIntermediateTensorInfo(p),a.disposeIntermediateTensorInfo(y),a.disposeIntermediateTensorInfo(_),a.disposeIntermediateTensorInfo(w),a.disposeIntermediateTensorInfo(I),a.disposeIntermediateTensorInfo(C),E}function Multinomial_multinomial(t){let{inputs:r,backend:a,attrs:n}=t,{logits:s}=r,{numSamples:i,seed:o,normalized:l}=n;assertNotComplex(s,"multinomial");let u=l?s:Softmax_softmax({inputs:{logits:s},backend:a,attrs:{dim:-1}}),p=u.shape[0],m=u.shape[1],y=a.data.get(u.dataId).values,_=[p,i],w=makeZerosTypedArray(sizeFromShape(_),"int32");for(let t=0;t+(t!==r)),uO=binaryKernelFunc(rt,uP,null,"bool");function OneHot_oneHot(t){let{inputs:r,backend:a,attrs:n}=t,{indices:s}=r,{dtype:i,depth:o,onValue:l,offValue:u}=n;assertNotComplex(s,"oneHot");let p=sizeFromShape(s.shape),m=new Float32Array(p*o);m.fill(u);let y=a.data.get(s.dataId).values;for(let t=0;t=0&&y[t]{assertShapesMatch(i,t.shape,"All tensors passed to stack must have matching shapes"),assert(o===t.dtype,()=>"All tensors passed to stack must have matching dtypes")});let l=[],u=Concat_concat({inputs:r.map(t=>{let r=ExpandDims_expandDims({inputs:{input:t},backend:a,attrs:{dim:s}});return l.push(r),r}),backend:a,attrs:{axis:s}});return l.forEach(t=>a.disposeIntermediateTensorInfo(t)),u}let uM={kernelName:ru,backendName:"cpu",kernelFunc:function(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{paddings:i,constantValue:o}=n;assertNotComplex(s,"pad");let l=i.map((t,r)=>t[0]+s.shape[r]+t[1]),u=i.map(t=>t[0]),p=a.data.get(s.dataId).values,m=sizeFromShape(s.shape),y=s.shape.length,_=computeStrides(s.shape),w=sizeFromShape(l),I=l.length,C=computeStrides(l),E=getArrayFromDType(s.dtype,w);0!==o&&E.fill(o);for(let t=0;tt+u[r]),I,C)]=p[t];return{dataId:a.write(E,l,s.dtype),shape:l,dtype:s.dtype}}},uL=binaryKernelFunc("Pow",createSimpleBinaryKernelImpl((t,r)=>Math.pow(t,r)));function prodImpl(t,r,a,n){let[s,i]=computeOutAndReduceShapes(t,n),o=upcastType(r,"int32"),l=makeZerosTypedArray(sizeFromShape(s),o),u=sizeFromShape(i);for(let t=0;ta.disposeIntermediateTensorInfo(t)),a.makeTensorInfo(A,E,I)}function validateIndices(t,r,a){t.forEach((t,n)=>{if(t<0||t>=a){let s=indexToLoc(n,r.length,computeStrides(r)).join(",");throw Error(`indices[${s}] = ${t} is not in [0, ${a})`)}})}function validateSplits(t,r){for(let a=0;as)throw Error("Ragged splits must not point past values");for(let t=1;tn[t])throw Error("Ragged splits must be sorted in ascending order")}}function makeSplits(t,r,a,n){let s=[],i=0,o=Array(r.length-1+a.length).fill(null).map(()=>[0]);validateSplits(a,n);let l=1;for(let t=0;t=0){let t=o[s],r=t[t.length-1]-n[l];for(let t=l;tn[r]=t)}return r}function computeFlatOuterDims(t,r){let a=t.slice(0,r);for(;a.lengtha.data.get(t.dataId).values),p=s.map(t=>t.shape),m=a.data.get(i.dataId).values,y=a.data.get(o.dataId).values,[_,w,I]=raggedGatherImpl(u,p,m,i.shape,i.dtype,y,o.shape,l),C=_.map(t=>a.makeTensorInfo([t.length],"int32",t)),E=a.makeTensorInfo(I,i.dtype,w);return C.concat([E])}function raggedRangeImpl(t,r,a,n,s,i,o){if(r.length>1)throw Error("starts must be a scalar or vector");if(s.length>1)throw Error("limits must be a scalar or vector");if(o.length>1)throw Error("deltas must be a scalar or vector");let l=0===r.length,u=0===s.length,p=0===o.length,m=[];l||m.push(r[0]),u||m.push(s[0]),p||m.push(o[0]);for(let t=1;t0&&os)a=0;else if((a=Math.ceil(Math.abs((o-s)/m)))>0x7fffffff)throw Error("Requires ((limit - start) / delta) <= 2147483647");_[r+1]=_[r]+a}let w=getArrayFromDType(a,_[y]),I=0;for(let r=0;ra&&(a=r)}return a}static getMaxWidthValueRowID(t){let r=t.length;if(0===r)return 0;let a=0,n=t[0],s=0;for(let i=1;i"Final length of result must be equal to firstDimension."),s}calculateOutputIndexRowSplit(t,r,a,n){let s=t.length,i=[];for(let o=0;o0&&i.length!==t[s-1])throw Error("Invalid row split size.");return i}calculateOutputIndexValueRowID(t,r,a,n){let s=t.length,i=[];if(0===s)return[];let o=0,l=t[0];if(l>=r.length)throw Error(`Got currentValueRowId=${l}, which is not less than ${r.length}`);let u=r[l];i.push(u);for(let p=1;p=0&&(++o=r.length)throw Error(`Got nextValueRowId=${s} which is not less than ${r.length}`);u=r[s]}i.push(u)}if(i.length!==t.length)throw Error("Invalid row ids.");return i}calculateOutputIndex(t,r,a,n){let s=this.getRowPartitionTensor(t),i=this.getRowPartitionTypeByDimension(t);switch(i){case uz.VALUE_ROWIDS:return this.calculateOutputIndexValueRowID(s,r,a,n);case uz.ROW_SPLITS:if(s.length-1>r.length)throw Error(`Row partition size is greater than output size: ${s.length-1} > ${r.length}`);return this.calculateOutputIndexRowSplit(s,r,a,n);default:throw Error(`Unsupported partition type: ${uz[i]}`)}}getFirstDimensionSize(){let t=this.rowPartitionValues[0];if(0===this.rowPartitionTypes.length)throw Error("No row_partition_types given.");let r=this.rowPartitionTypes[0];switch(r){case uz.FIRST_DIM_SIZE:return t[0];case uz.VALUE_ROWIDS:throw Error("Cannot handle VALUE_ROWIDS in first dimension.");case uz.ROW_SPLITS:return this.rowPartitionValuesShapes[0][0]-1;default:throw Error(`Cannot handle type ${uz[r]}`)}}compute(){if(this.rowPartitionValues[0].length<=0)throw Error("Invalid first partition input. Tensor requires at least one element.");let t=this.getFirstDimensionSize(),r=this.calculateOutputSize(t),a=Array(this.raggedRank+1);a[a.length-1]=1;for(let t=a.length-2;t>=0;--t)a[t]=a[t+1]*r[t+1];let n=makeShape(r,!1),s=getArrayFromDType(this.valuesDType,sizeFromShape(n));if(a[0]*r[0]>0){let i=this.calculateFirstParentOutputIndex(t,a[0],r[0]);for(let t=1;t<=this.raggedRank;++t)i=this.calculateOutputIndex(t-1,i,a[t],r[t]);this.setOutput(this.raggedRank,i,s,n)}return[n,s]}setOutput(t,r,a,n){if(0===a.length)return;let s=this.values,i=n.slice(),o=sizeFromShape(i=i.slice(t+1)),l=r.length,u=this.defaultValue;if(u.length!==o&&1!==u.length){let t=this.defaultValueShape;globals_tidy(()=>{u=nd(a6(u,t),i).dataSync()})}let p=0,m=0,y=0;for(let t=0;t<=l;++t){let n=t=l&&(n=Math.floor(a.length/o)),n>y)if(1===this.defaultValue.length)a.subarray(y*o,n*o).fill(this.defaultValue[0]),y=n;else for(;n>y;)copyArray(a.slice(y*o),u,o),++y;n<0?(p=t+1,m=y):(p=t,y=(m=y)+1)}}};function copyArray(t,r,a){for(let n=0;n= 0`);if(n<-1)throw Error(`Dimension ${n} must be >= -1`);n=-1}a.push(n)}return a}function raggedTensorToTensorImpl(t,r,a,n,s,i,o,l,u,p){return new RaggedTensorToTensorOp(t,r,a,n,s,i,o,l,u,p).compute()}function RaggedTensorToTensor_raggedTensorToTensor(t){let{inputs:r,backend:a,attrs:n}=t,{shape:s,values:i,defaultValue:o,rowPartitionTensors:l}=r,{rowPartitionTypes:u}=n,p=a.data.get(s.dataId).values,m=a.data.get(i.dataId).values,y=a.data.get(o.dataId).values,_=l.map(t=>a.data.get(t.dataId).values),w=l.map(t=>t.shape),[I,C]=raggedTensorToTensorImpl(p,s.shape,m,i.shape,i.dtype,y,o.shape,_,w,u);return a.makeTensorInfo(I,i.dtype,C)}function rangeImpl(t,r,a,n){let s=t===r,i=t1;if(s||i||o)return makeZerosTypedArray(0,n);let l=makeZerosTypedArray(Math.abs(Math.ceil((r-t)/a)),n);r1/t);function ResizeBilinear_resizeBilinear(t){let{inputs:r,backend:a,attrs:n}=t,{images:s}=r,{alignCorners:i,halfPixelCenters:o,size:l}=n;assertNotComplex(s,"resizeBilinear");let u=computeStrides(s.shape),[p,m]=l,[y,_,w,I]=s.shape,C=a.data.get(s.dataId).values,E=new Float32Array(sizeFromShape([y,p,m,I])),A=[i&&p>1?_-1:_,i&&m>1?w-1:w],$=[i&&p>1?p-1:p,i&&m>1?m-1:m],F=0,D=A[0]/$[0],P=A[1]/$[1];for(let t=0;t1?p-1:p,o&&w>1?m-1:m],E=[o&&_>1?_-1:_,o&&w>1?w-1:w],A=C[0]/E[0],$=C[1]/E[1],F=a.data.get(i.dataId).values,D=0;for(let t=0;t1?_-1:_,i&&m>1?w-1:w],$=[i&&p>1?p-1:p,i&&m>1?m-1:m],F=A[0]/$[0],D=A[1]/$[1],P=0;for(let t=0;t1?m-1:m,o&&I>1?y-1:y],$=[o&&w>1?w-1:w,o&&I>1?I-1:I],F=A[0]/$[0],D=A[1]/$[1],P=1/F,L=1/D,z=2*Math.ceil(P)+2,B=2*Math.ceil(L)+2;for(let t=0;t=w)continue;let C=r+_*u[1],A=_*F;if(t===Math.min(m-1,o?Math.round(A):Math.floor(A)))for(let t=0;t=I)continue;let n=C+r*u[2],i=r*D;s===Math.min(y-1,o?Math.round(i):Math.floor(i))&&(l+=E[n+a])}}C[i+a]=l}}}}return a.makeTensorInfo(s.shape,s.dtype,C)}function Reverse_reverse(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{dims:i}=n;assertNotComplex(s,"reverse");let o=s.shape.length,l=parseAxisParam(i,s.shape);if(0===o)return Identity_identity({inputs:{x:s},backend:a});let u=new TensorBuffer(s.shape,s.dtype),p=a.bufferSync(s);for(let t=0;ta[t]=s.shape[t]-1-a[t]),u.set(p.get(...a),...r)}return a.makeTensorInfo(u.shape,u.dtype,u.values)}let uB=unaryKernelFunc(rN,t=>{let r=Math.floor(t);return t-r<.5?Math.floor(t):t-r>.5?Math.ceil(t):r%2==0?r:r+1}),uW=createSimpleUnaryImpl(t=>1/Math.sqrt(t)),uU=unaryKernelFuncFromImpl(rC,uW);function scatterImpl(t,r,a,n,s,i,o,l,u,p){let m=t.values,y=r.values;if(0===n)return buffer(a,r.dtype);let _=u instanceof TensorBuffer?u:buffer([n/s,s],r.dtype);"string"==typeof u||"number"==typeof u?_.values.fill(u):"boolean"==typeof u&&_.values.fill(+u);for(let t=0;t=n/s)throw Error(`Invalid indices: ${i} does not index into ${a}`);for(let a=0;a1||1===s.shape.length?1:sizeFromShape(s.shape.slice(1));for(let t=0;tt>=0?ok*t:oT*(Math.exp(t)-1)),uj=unaryKernelFunc(rO,t=>t<0?-1:+(t>0)),uK=unaryKernelFunc("Sin",t=>Math.sin(t)),uH=unaryKernelFunc(rP,t=>Math.sinh(t)),uq=Math.log(11920928955078125e-23)+2,uX=unaryKernelFunc(rL,t=>{let r=Math.exp(t);return t-uq?t:Math.log(1+r)});function SpaceToBatchND_spaceToBatchND(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{blockShape:i,paddings:o}=n;assertNotComplex([s],"spaceToBatchND");let l=sizeFromShape(i),u=[[0,0]];u.push(...o);for(let t=1+i.length;t=u)throw Error(getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(r,a,u));++I[a],_=_&&a>=w,w=a}let C=!0;for(let t=0;t0&&(I[t]+=I[t-1])}if(C&&_){for(let t=0;tNumber(t)))),a.makeTensorInfo([C.length],n.dtype,new Int32Array(C))]}function sparseReshapeImpl(t,r,a,n,s){let i=sizeFromShape(n),o=r[0],l=s.length,u=[],p=1,m=-1;for(let t=0;t0){_[y-1]=1;for(let t=y-2;t>=0;--t)_[t]=_[t+1]*n[t+1]}let w=[];if(l>0){w[l-1]=1;for(let t=l-2;t>=0;--t)w[t]=w[t+1]*u[t+1]}let I=getArrayFromDType(a,o*l);for(let r=0;r0?s[l-1]+1:0;if(m<0)throw Error(getSparseSegmentReductionNegativeSegmentIdsErrorMessage());let y=r.slice();y[0]=m;let _=getArrayFromDType(a,y.reduce((t,r)=>t*r,1));if(0===l)return m>0&&_.fill(o),[_,y];if(m<=0)throw Error(getSparseSegmentReductionNegativeSegmentIdsErrorMessage());let w=0,I=1,C=0,E=s[0];for(;;){let r=0;if(I=r)throw Error(getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage())}if(E<0||E>=m)throw Error(getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(E,m));E>C&&_.fill(o,C*p,E*p);for(let r=w;r=u[0])throw Error(getSparseSegmentReductionIndicesOutOfRangeErrorMessage(r,n[r],u[0]));for(let r=0;rl)break}return C{let r=[...m];r[l]=t;let n=Slice_slice({inputs:{x:s},backend:a,attrs:{begin:p,size:r}});return p[l]+=t,n})}let uY=createSimpleUnaryImpl(t=>Math.sqrt(t)),uZ=unaryKernelFunc(rz,t=>Math.sqrt(t)),uJ=binaryKernelFunc(rq,createSimpleBinaryKernelImpl((t,r)=>{let a=t-r;return a*a})),uQ=createSimpleUnaryImpl((t,r)=>{let{pattern:a,replaceGlobal:n,rewrite:s}=r;return t.replace(new RegExp(a,n?"g":""),s)}),u0=unaryKernelFuncFromImpl(rY,uQ),u1=unaryKernelFunc(ae,(t,r)=>isNaN(t)?NaN:t>0?1:r.alpha);function stridedSliceImpl(t,r,a,n){let s=buffer(t,r.dtype);for(let t=0;t=1,()=>`Input must have rank at least 1, got: ${i.shape.length}`);let t=computeOutShape(F,D,P),a=Slice_slice({inputs:{x:i},backend:n,attrs:{begin:F,size:t}});r=Reshape_reshape({inputs:{x:a},backend:n,attrs:{shape:C}}),n.disposeIntermediateTensorInfo(a)}else{let t=stridedSliceImpl(I,n.bufferSync(i),P,F);r=n.makeTensorInfo(C,t.dtype,t.values)}return r}let StringNGramsOp=class StringNGramsOp{constructor(t,r,a,n,s,i){this.separator=encodeString(t),this.nGramWidths=r,this.leftPad=encodeString(a),this.rightPad=encodeString(n),this.padWidth=s,this.preserveShort=i}getPadWidth(t){return Math.min(this.padWidth<0?t-1:this.padWidth,t-1)}getNumNGrams(t,r){return Math.max(0,t+2*this.getPadWidth(r)-r+1)}createNGrams(t,r,a,n,s,i){for(let o=0;o0?0:o-u);l=0+p*this.leftPad.length;for(let r=0;rt.forEach(t=>w[I++]=t);for(let t=0;t0){appendToNGram(t[_+y-1]);for(let t=0;t0){let t=r[0];if(0!==t)throw Error(`First split value must be 0, got ${t}`);for(let s=1;s=t;if(!(n=n&&r[s]<=a))throw Error(`Invalid split value ${r[s]}, must be in [${t}, ${a}]`);t=r[s]}if(t!==a)throw Error(`Last split value must be data size. Expected ${a}, got ${t}`)}let s=n-1,i=getArrayFromDType("int32",n);if(0===a||0===n){let t=Array(a);for(let t=0;t<=s;++t)i[t]=0;return[t,i]}i[0]=0;for(let t=1;t<=s;++t){let a=r[t]-r[t-1],n=0;this.nGramWidths.forEach(t=>{n+=this.getNumNGrams(a,t)}),this.preserveShort&&a>0&&0===n&&(n=1),i[t]=i[t-1]+n}let o=Array(i[s]);for(let a=0;a{let l=r[a+1]-r[a],u=this.getNumNGrams(l,i);this.createNGrams(t,n,o,s,u,i),s+=u}),this.preserveShort&&s===i[a]){let i=r[a+1]-r[a];if(0===i)continue;let l=i+2*this.padWidth;this.createNGrams(t,n,o,s,1,l)}}return[o,i]}};function stringNGramsImpl(t,r,a,n,s,i,o,l){return new StringNGramsOp(a,n,s,i,o,l).compute(t,r)}function StringNGrams_stringNGrams(t){let{inputs:r,backend:a,attrs:n}=t,{separator:s,nGramWidths:i,leftPad:o,rightPad:l,padWidth:u,preserveShortSequences:p}=n,{data:m,dataSplits:y}=r,[_,w]=stringNGramsImpl(a.data.get(m.dataId).values,a.data.get(y.dataId).values,s,i,o,l,u,p);return[a.makeTensorInfo([_.length],"string",_),a.makeTensorInfo(y.shape,"int32",w)]}function StringSplit_impl_split(t,r,a,n){if(!t.length)return;if(0===r.length){for(let r=0;rMath.tan(t)),u3=unaryKernelFunc(r1,t=>Math.tanh(t));function tileImpl(t,r){let a=Array(t.rank);for(let n=0;n{let a=r.value-t.value;return 0===a?t.index-r.index:a};function TopK_impl_select(t,r,a=0,n=t.length-1){for(;n>a;){if(n-a>600){let s=n-a+1,i=r-a+1,o=Math.log(s),l=.5*Math.exp(2*o/3),u=.5*Math.sqrt(o*l*(s-l)/s)*Math.sign(i-s/2),p=Math.max(a,Math.floor(r-i*l/s+u)),m=Math.min(n,Math.floor(r+(s-i)*l/s+u));TopK_impl_select(t,r,p,m)}let s=t[r],i=a,o=n;for(swap(t,a,r),comparePair(t[n],s)>0&&swap(t,a,n);icomparePair(t[i],s);)i+=1;for(;comparePair(t[o],s)>0;)o-=1}0===comparePair(t[a],s)?swap(t,a,o):swap(t,o+=1,n),o<=r&&(a=o+1),r<=o&&(n=o-1)}}function topKImpl(t,r,a,n,s){let i=r[r.length-1],[o,l]=[t.length/i,i],u=getArrayFromDType(a,o*n),p=getArrayFromDType("int32",o*n);for(let r=0;ro[r]={value:t,index:r}),nr-1)if(r<=1)a=0;else{let t=2*r;(a-=t*Math.trunc(a/t))>=r&&(a=t-a-1)}return clamp(0,a,r-1)}function mapCoordWrap(t,r){let a=t;return a<0?r<=1?a=0:a+=r*(Math.trunc(-a/(r-1))+1):a>r-1&&(r<=1?a=0:a-=r*Math.trunc(a/(r-1))),clamp(0,a,r-1)}function mapCoordConstant(t,r){return t}function mapCoordNearest(t,r){return clamp(0,t,r-1)}function readWithFillValue(t,r,a,n,s,i,o,l,u,p,m){return 0<=l&&l{for(let a=0;aa.disposeIntermediateTensorInfo(t)),w}for(let t of[{kernelName:an,backendName:"cpu",kernelFunc:_fusedMatMul},{kernelName:"Abs",backendName:"cpu",kernelFunc:t=>{let{x:r}=t.inputs,a=t.backend;assertNotComplex(r,"abs");let n=new Float32Array(sizeFromShape(r.shape));return n=simpleAbsImpl(a.data.get(r.dataId).values),a.makeOutput(n,r.shape,r.dtype)}},{kernelName:eB,backendName:"cpu",kernelFunc:lW},{kernelName:eW,backendName:"cpu",kernelFunc:lU},{kernelName:"Add",backendName:"cpu",kernelFunc:lB},{kernelName:eU,backendName:"cpu",kernelFunc:AddN_addN},{kernelName:"All",backendName:"cpu",kernelFunc:All_all},{kernelName:"Any",backendName:"cpu",kernelFunc:Any_any},{kernelName:eG,backendName:"cpu",kernelFunc:ArgMax_argMax},{kernelName:ej,backendName:"cpu",kernelFunc:ArgMin_argMin},{kernelName:eK,backendName:"cpu",kernelFunc:lG},{kernelName:eH,backendName:"cpu",kernelFunc:lj},{kernelName:eq,backendName:"cpu",kernelFunc:lK},{kernelName:eY,backendName:"cpu",kernelFunc:lH},{kernelName:eX,backendName:"cpu",kernelFunc:lq},{kernelName:eZ,backendName:"cpu",kernelFunc:AvgPool_avgPool},{kernelName:eQ,backendName:"cpu",kernelFunc:avgPool3D},{kernelName:e0,backendName:"cpu",kernelFunc:avgPool3DGrad},{kernelName:eJ,backendName:"cpu",kernelFunc:AvgPoolGrad_avgPoolGrad},{kernelName:e1,backendName:"cpu",kernelFunc:batchMatMul},{kernelName:tD,backendName:"cpu",kernelFunc:BatchNorm_batchNorm},{kernelName:e2,backendName:"cpu",kernelFunc:BatchToSpaceND_batchToSpaceND},{kernelName:e3,backendName:"cpu",kernelFunc:Bincount_bincount},{kernelName:e4,backendName:"cpu",kernelFunc:lY},{kernelName:e6,backendName:"cpu",kernelFunc:BroadcastArgs_broadcastArgs},{kernelName:e5,backendName:"cpu",kernelFunc:Cast_cast},{kernelName:e8,backendName:"cpu",kernelFunc:lJ},{kernelName:e7,backendName:"cpu",kernelFunc:lQ},{kernelName:e9,backendName:"cpu",kernelFunc:Complex_complex},{kernelName:te,backendName:"cpu",kernelFunc:t=>{let{x:r}=t.inputs,a=t.backend,n=new Float32Array(sizeFromShape(r.shape)),s=a.data.get(r.dataId),i=s.complexTensorInfos.real,o=s.complexTensorInfos.imag,l=a.data.get(i.dataId).values,u=a.data.get(o.dataId).values;for(let t=0;t{let{x:n,filter:s}=t,{strides:i,pad:o,dilations:l}=a,u=r.data.get(n.dataId).values,p=n.shape.length,m=r.data.get(s.dataId).values,y=s.shape.length,{batchSize:_,inHeight:w,inWidth:I,inChannels:C,outHeight:E,outWidth:A,padInfo:$,strideHeight:F,strideWidth:D,filterHeight:P,filterWidth:L,dilationHeight:z,dilationWidth:B,outShape:G}=computeDilation2DInfo(n.shape,s.shape,i,o,"NHWC",l),j=sizeFromShape(G),K=G.length,H=getArrayFromDType(n.dtype,j);for(let t=0;t<_;++t)for(let r=0;r=0&&i=0&&w_&&(_=C)}}}H[locToIndex([t,r,i,l],K,computeStrides(G))]=_}}}return{dataId:r.write(toTypedArray(H,n.dtype),G,n.dtype),shape:G,dtype:n.dtype}}},{kernelName:tT,backendName:"cpu",kernelFunc:({inputs:t,backend:r,attrs:a})=>{let{x:n,filter:s,dy:i}=t,{strides:o,pad:l,dilations:u}=a,p=toNestedArray(n.shape,r.data.get(n.dataId).values),m=toNestedArray(s.shape,r.data.get(s.dataId).values),{batchSize:y,inHeight:_,inWidth:w,inChannels:I,outHeight:C,outWidth:E,padInfo:A,strideHeight:$,strideWidth:F,filterHeight:D,filterWidth:P,dilationHeight:L,dilationWidth:z,outShape:B}=computeDilation2DInfo(n.shape,s.shape,o,l,"NHWC",u);assert(i.rank===B.length,()=>`Error in ${tT}, dy must have the same rank as output ${B.length}, but got ${i.rank}`);let G=toNestedArray(B,r.data.get(i.dataId).values),j=makeZerosNestedTypedArray(s.shape,s.dtype);for(let t=0;t=0&&n<_)for(let a=0;a=0&&yo&&(o=s,l=r,u=a)}}}j[l][u][i]+=G[t][r][n][i]}}}return{dataId:r.write(toTypedArray(j,n.dtype),s.shape,s.dtype),shape:s.shape,dtype:s.dtype}}},{kernelName:t_,backendName:"cpu",kernelFunc:({inputs:t,backend:r,attrs:a})=>{let{x:n,filter:s,dy:i}=t,{strides:o,pad:l,dilations:u}=a,p=toNestedArray(n.shape,r.data.get(n.dataId).values),m=toNestedArray(s.shape,r.data.get(s.dataId).values),{batchSize:y,inHeight:_,inWidth:w,inChannels:I,outHeight:C,outWidth:E,padInfo:A,strideHeight:$,strideWidth:F,filterHeight:D,filterWidth:P,dilationHeight:L,dilationWidth:z,outShape:B}=computeDilation2DInfo(n.shape,s.shape,o,l,"NHWC",u);assert(i.rank===B.length,()=>`Error in ${t_}, dy must have the same rank as output ${B.length}, but got ${i.rank}`);let G=toNestedArray(B,r.data.get(i.dataId).values),j=makeZerosNestedTypedArray(n.shape,n.dtype);for(let t=0;t=0&&n<_)for(let a=0;a=0&&yo&&(o=s,l=n,u=y)}}}j[t][l][u][i]+=G[t][r][n][i]}}}return{dataId:r.write(toTypedArray(j,n.dtype),n.shape,n.dtype),shape:n.shape,dtype:n.dtype}}},{kernelName:tk,backendName:"cpu",kernelFunc:Draw_draw},{kernelName:tw,backendName:"cpu",kernelFunc:Einsum_einsum},{kernelName:"Elu",backendName:"cpu",kernelFunc:lD},{kernelName:tI,backendName:"cpu",kernelFunc:eluGrad},{kernelName:tN,backendName:"cpu",kernelFunc:l6},{kernelName:"Erf",backendName:"cpu",kernelFunc:l5},{kernelName:"Exp",backendName:"cpu",kernelFunc:l7},{kernelName:tC,backendName:"cpu",kernelFunc:ExpandDims_expandDims},{kernelName:tE,backendName:"cpu",kernelFunc:ue},{kernelName:"FFT",backendName:"cpu",kernelFunc:FFT_fft},{kernelName:tA,backendName:"cpu",kernelFunc:Fill_fill},{kernelName:t$,backendName:"cpu",kernelFunc:({inputs:t,attrs:r,backend:a})=>{let{image:n}=t,s=getArrayFromDType(n.dtype,sizeFromShape(n.shape)),[i,o,l,u]=n.shape,p=a.data.get(n.dataId).values;for(let t=0;t=0&&o{let{x:n}=t,{filterSize:s,strides:i,pad:o,includeBatchInIndex:l}=r;assertNotComplex(n,"MaxPoolWithArgmax");let u=a.data.get(n.dataId).values,p=computePool2DInfo(n.shape,s,i,[1,1],o),[m,y]=maxPoolWithArgmaxImpl(u,n.shape,n.dtype,l,p),_=a.write(m,p.outShape,n.dtype),w=a.write(y,p.outShape,n.dtype);return[{dataId:_,shape:p.outShape,dtype:n.dtype},{dataId:w,shape:p.outShape,dtype:"int32"}]}},{kernelName:t5,backendName:"cpu",kernelFunc:Mean_mean},{kernelName:"Min",backendName:"cpu",kernelFunc:Min_min},{kernelName:t8,backendName:"cpu",kernelFunc:uA},{kernelName:t7,backendName:"cpu",kernelFunc:MirrorPad_mirrorPad},{kernelName:"Mod",backendName:"cpu",kernelFunc:u$},{kernelName:t9,backendName:"cpu",kernelFunc:Multinomial_multinomial},{kernelName:re,backendName:"cpu",kernelFunc:l3},{kernelName:"Neg",backendName:"cpu",kernelFunc:Neg_neg},{kernelName:rr,backendName:"cpu",kernelFunc:nonMaxSuppressionV3},{kernelName:rn,backendName:"cpu",kernelFunc:nonMaxSuppressionV4},{kernelName:rs,backendName:"cpu",kernelFunc:nonMaxSuppressionV5},{kernelName:rt,backendName:"cpu",kernelFunc:uO},{kernelName:ro,backendName:"cpu",kernelFunc:OneHot_oneHot},{kernelName:ri,backendName:"cpu",kernelFunc:OnesLike_onesLike},{kernelName:rl,backendName:"cpu",kernelFunc:pack},uM,{kernelName:"Pow",backendName:"cpu",kernelFunc:uL},{kernelName:rp,backendName:"cpu",kernelFunc:Prelu_prelu},{kernelName:rh,backendName:"cpu",kernelFunc:Prod_prod},{kernelName:rc,backendName:"cpu",kernelFunc:RaggedGather_raggedGather},{kernelName:rd,backendName:"cpu",kernelFunc:RaggedRange_raggedRange},{kernelName:rm,backendName:"cpu",kernelFunc:RaggedTensorToTensor_raggedTensorToTensor},{kernelName:rf,backendName:"cpu",kernelFunc:Range_range},{kernelName:rg,backendName:"cpu",kernelFunc:Real_real},ur,{kernelName:ry,backendName:"cpu",kernelFunc:uV},{kernelName:rx,backendName:"cpu",kernelFunc:lO},{kernelName:rw,backendName:"cpu",kernelFunc:lM},{kernelName:rv,backendName:"cpu",kernelFunc:Reshape_reshape},{kernelName:rk,backendName:"cpu",kernelFunc:ResizeBilinear_resizeBilinear},{kernelName:rS,backendName:"cpu",kernelFunc:resizeBilinearGrad},{kernelName:r_,backendName:"cpu",kernelFunc:ResizeNearestNeighbor_resizeNearestNeighbor},{kernelName:rT,backendName:"cpu",kernelFunc:resizeNearestNeighborGrad},{kernelName:rI,backendName:"cpu",kernelFunc:Reverse_reverse},{kernelName:ar,backendName:"cpu",kernelFunc:({inputs:t,attrs:r,backend:a})=>{let{image:n}=t,{radians:s,fillValue:i,center:o}=r,l=getArrayFromDType(n.dtype,sizeFromShape(n.shape)),[u,p,m,y]=n.shape,[_,w]=getImageCenter(o,p,m),I=Math.sin(s),C=Math.cos(s),E=a.data.get(n.dataId).values;for(let t=0;t=0&&D=0&&P{let{x:a}=t;assertNotComplex(a,"square");let n=r.data.get(a.dataId).values,s=new Float32Array(n.length);for(let t=0;ttypeof OffscreenCanvas&&2===t)return new OffscreenCanvas(300,150);if("u">typeof document)return document.createElement("canvas");throw Error("Cannot create a canvas in this context")}function getWebGLRenderingContext(t,r){if(1!==t&&2!==t)throw Error("Cannot get WebGL rendering context, WebGL is disabled.");let a=null==r?createCanvas(t):r;return(a.addEventListener("webglcontextlost",r=>{r.preventDefault(),delete u4[t]},!1),eV.getBool("SOFTWARE_WEBGL_ENABLED")&&(u6.failIfMajorPerformanceCaveat=!1),1===t)?a.getContext("webgl",u6)||a.getContext("experimental-webgl",u6):a.getContext("webgl2",u6)}function getUnpackedArraySizeFromMatrixSize(t,r){return t*r}function getDenseTexShape(t){return sizeToSquarishShape(Math.ceil(sizeFromShape(t)/4))}function getPackedMatrixTextureShapeWidthHeight(t,r){return[Math.max(1,Math.ceil(r/2)),Math.max(1,Math.ceil(t/2))]}function getPackedRGBAArraySizeFromMatrixShape(t,r){let[a,n]=getPackedMatrixTextureShapeWidthHeight(t,r);return a*n*4}function getTextureConfig(t,r){let a,n,s,i,o,l,u,p,m,y;return 2===eV.getNumber("WEBGL_VERSION")?(a=t.R32F,n=t.R16F,s=t.RGBA16F,i=t.RGBA32F,o=t.RED,u=4,p=1,m=t.HALF_FLOAT,y=t.FLOAT,l=t.RGBA8):(a=t.RGBA,n=t.RGBA,s=t.RGBA,i=t.RGBA,o=t.RGBA,u=4,p=4,m=null!=r?r.HALF_FLOAT_OES:null,y=t.FLOAT,l=t.RGBA),{internalFormatFloat:a,internalFormatHalfFloat:n,internalFormatPackedHalfFloat:s,internalFormatPackedFloat:i,textureFormatFloat:o,downloadTextureFormat:l,downloadUnpackNumChannels:u,defaultNumChannels:p,textureTypeHalfFloat:m,textureTypeFloat:y}}function callAndCheck(t,r){let a=r();return eV.getBool("DEBUG")&&checkWebGLError(t),a}function checkWebGLError(t){let r=t.getError();if(r!==t.NO_ERROR)throw Error("WebGL Error: "+getWebGLErrorMessage(t,r))}function canBeRepresented(t){return!!(eV.getBool("WEBGL_RENDER_FLOAT32_ENABLED")||0===t||596e-10Math.abs(t))}function getWebGLErrorMessage(t,r){switch(r){case t.NO_ERROR:return"NO_ERROR";case t.INVALID_ENUM:return"INVALID_ENUM";case t.INVALID_VALUE:return"INVALID_VALUE";case t.INVALID_OPERATION:return"INVALID_OPERATION";case t.INVALID_FRAMEBUFFER_OPERATION:return"INVALID_FRAMEBUFFER_OPERATION";case t.OUT_OF_MEMORY:return"OUT_OF_MEMORY";case t.CONTEXT_LOST_WEBGL:return"CONTEXT_LOST_WEBGL";default:return`Unknown error code ${r}`}}function getExtensionOrThrow(t,r){return throwIfNull(t,()=>t.getExtension(r),'Extension "'+r+'" not supported on this browser.')}function createVertexShader(t,r){let a=throwIfNull(t,()=>t.createShader(t.VERTEX_SHADER),"Unable to create vertex WebGLShader.");if(callAndCheck(t,()=>t.shaderSource(a,r)),callAndCheck(t,()=>t.compileShader(a)),!1===t.getShaderParameter(a,t.COMPILE_STATUS))throw console.log(t.getShaderInfoLog(a)),Error("Failed to compile vertex shader.");return a}function createFragmentShader(t,r){let a=throwIfNull(t,()=>t.createShader(t.FRAGMENT_SHADER),"Unable to create fragment WebGLShader.");if(callAndCheck(t,()=>t.shaderSource(a,r)),callAndCheck(t,()=>t.compileShader(a)),eV.get("ENGINE_COMPILE_ONLY"))return a;if(!1===t.getShaderParameter(a,t.COMPILE_STATUS))throw logShaderSourceAndInfoLog(r,t.getShaderInfoLog(a)),Error("Failed to compile fragment shader.");return a}(z=eo||(eo={}))[z.DENSE=0]="DENSE",z[z.SHARED_BATCH=1]="SHARED_BATCH",(B=el||(el={}))[B.RENDER=0]="RENDER",B[B.UPLOAD=1]="UPLOAD",B[B.PIXELS=2]="PIXELS",B[B.DOWNLOAD=3]="DOWNLOAD",(G=eu||(eu={}))[G.UNPACKED_FLOAT16=0]="UNPACKED_FLOAT16",G[G.UNPACKED_FLOAT32=1]="UNPACKED_FLOAT32",G[G.PACKED_4X1_UNSIGNED_BYTE=2]="PACKED_4X1_UNSIGNED_BYTE",G[G.PACKED_2X2_FLOAT32=3]="PACKED_2X2_FLOAT32",G[G.PACKED_2X2_FLOAT16=4]="PACKED_2X2_FLOAT16";let u5=/ERROR: [0-9]+:([0-9]+):/g;function logShaderSourceAndInfoLog(t,r){let a=u5.exec(r);if(null==a){console.log(`Couldn't parse line number in error: ${r}`),console.log(t);return}let n=+a[1],s=t.split(` `),i=s.length.toString().length+2,o=s.map((t,r)=>rightPad((r+1).toString(),i)+t),l=0;for(let t=0;tt.createProgram(),"Unable to create WebGLProgram.")}function linkProgram(t,r){if(callAndCheck(t,()=>t.linkProgram(r)),!eV.get("ENGINE_COMPILE_ONLY")&&!1===t.getProgramParameter(r,t.LINK_STATUS))throw console.log(t.getProgramInfoLog(r)),Error("Failed to link vertex and fragment shaders.")}function validateProgram(t,r){if(callAndCheck(t,()=>t.validateProgram(r)),!1===t.getProgramParameter(r,t.VALIDATE_STATUS))throw console.log(t.getProgramInfoLog(r)),Error("Shader program validation failed.")}function createStaticVertexBuffer(t,r){let a=throwIfNull(t,()=>t.createBuffer(),"Unable to create WebGLBuffer");return callAndCheck(t,()=>t.bindBuffer(t.ARRAY_BUFFER,a)),callAndCheck(t,()=>t.bufferData(t.ARRAY_BUFFER,r,t.STATIC_DRAW)),a}function createStaticIndexBuffer(t,r){let a=throwIfNull(t,()=>t.createBuffer(),"Unable to create WebGLBuffer");return callAndCheck(t,()=>t.bindBuffer(t.ELEMENT_ARRAY_BUFFER,a)),callAndCheck(t,()=>t.bufferData(t.ELEMENT_ARRAY_BUFFER,r,t.STATIC_DRAW)),a}function createTexture(t){return throwIfNull(t,()=>t.createTexture(),"Unable to create WebGLTexture.")}function validateTextureSize(t,r){let a=eV.getNumber("WEBGL_MAX_TEXTURE_SIZE");if(t<=0||r<=0)throw Error(`Requested texture size [${t}x${r}] is invalid.`);if(t>a||r>a)throw Error(`Requested texture size [${t}x${r}] greater than WebGL maximum on this browser / GPU [${a}x${a}].`)}function createFramebuffer(t){return throwIfNull(t,()=>t.createFramebuffer(),"Unable to create WebGLFramebuffer.")}function bindVertexBufferToProgramAttribute(t,r,a,n,s,i,o){let l=t.getAttribLocation(r,a);return -1!==l&&(callAndCheck(t,()=>t.bindBuffer(t.ARRAY_BUFFER,n)),callAndCheck(t,()=>t.vertexAttribPointer(l,s,t.FLOAT,!1,i,o)),callAndCheck(t,()=>t.enableVertexAttribArray(l)),!0)}function bindTextureUnit(t,r,a){validateTextureUnit(t,a),callAndCheck(t,()=>t.activeTexture(t.TEXTURE0+a)),callAndCheck(t,()=>t.bindTexture(t.TEXTURE_2D,r))}function getProgramUniformLocationOrThrow(t,r,a){return throwIfNull(t,()=>t.getUniformLocation(r,a),'uniform "'+a+'" not present in program.')}function getProgramUniformLocation(t,r,a){return t.getUniformLocation(r,a)}function bindTextureToProgramUniformSampler(t,r,a,n){callAndCheck(t,()=>bindTextureUnit(t,r,n)),callAndCheck(t,()=>t.uniform1i(a,n))}function bindColorTextureToFramebuffer(t,r,a){callAndCheck(t,()=>t.bindFramebuffer(t.FRAMEBUFFER,a)),callAndCheck(t,()=>t.framebufferTexture2D(t.FRAMEBUFFER,t.COLOR_ATTACHMENT0,t.TEXTURE_2D,r,0))}function unbindColorTextureFromFramebuffer(t,r){callAndCheck(t,()=>t.bindFramebuffer(t.FRAMEBUFFER,r)),callAndCheck(t,()=>t.framebufferTexture2D(t.FRAMEBUFFER,t.COLOR_ATTACHMENT0,t.TEXTURE_2D,null,0))}function validateFramebuffer(t){let r=t.checkFramebufferStatus(t.FRAMEBUFFER);if(r!==t.FRAMEBUFFER_COMPLETE)throw Error("Error binding framebuffer: "+getFramebufferErrorMessage(t,r))}function getFramebufferErrorMessage(t,r){switch(r){case t.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_ATTACHMENT";case t.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";case t.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:return"FRAMEBUFFER_INCOMPLETE_DIMENSIONS";case t.FRAMEBUFFER_UNSUPPORTED:return"FRAMEBUFFER_UNSUPPORTED";default:return`unknown error ${r}`}}function throwIfNull(t,r,a){let n=callAndCheck(t,()=>r());if(null==n)throw Error(a);return n}function validateTextureUnit(t,r){let a=t.MAX_COMBINED_TEXTURE_IMAGE_UNITS-1,n=r+t.TEXTURE0;if(na){let t=`[gl.TEXTURE0, gl.TEXTURE${a}]`;throw Error(`textureUnit must be in ${t}.`)}}function getBatchDim(t,r=2){return sizeFromShape(t.slice(0,t.length-r))}function getRowsCols(t){if(0===t.length)throw Error("Cannot get rows and columns of an empty shape array.");return[t.length>1?t[t.length-2]:1,t[t.length-1]]}function getShapeAs3D(t){let r=[1,1,1];return 0!==t.length&&(1!==t.length||1!==t[0])&&(r=[getBatchDim(t),...getRowsCols(t)]),r}function getTextureShapeFromLogicalShape(t,r=!1){let a=eV.getNumber("WEBGL_MAX_TEXTURE_SIZE"),n=eV.getNumber("WEBGL_MAX_SIZE_FOR_NARROW_TEXTURE");n===1/0&&eV.getBool("WEBGL_AUTO_SQUARIFY_NARROW_TEXTURE_SHAPE")&&(n=a/2),r&&(a*=2,n*=2,1===(t=t.map((r,a)=>a>=t.length-2?nearestLargerEven(t[a]):t[a])).length&&(t=[2,t[0]])),2!==t.length&&(t=util_base_squeezeShape(t).newShape);let s=sizeFromShape(t),i=null;t.length<=1&&s<=a?i=[1,s]:2===t.length&&t[0]<=a&&t[1]<=a?i=t:3===t.length&&t[0]*t[1]<=a&&t[2]<=a?i=[t[0]*t[1],t[2]]:3===t.length&&t[0]<=a&&t[1]*t[2]<=a?i=[t[0],t[1]*t[2]]:4===t.length&&t[0]*t[1]*t[2]<=a&&t[3]<=a?i=[t[0]*t[1]*t[2],t[3]]:4===t.length&&t[0]<=a&&t[1]*t[2]*t[3]<=a&&(i=[t[0],t[1]*t[2]*t[3]]);let o=null!=i&&Math.max(...i)>n&&Math.min(...i)<=(r?2:1)&&Math.min(...i)>0;if(null==i||o)if(r){let r=getBatchDim(t),a=2,n=2;t.length&&([a,n]=getRowsCols(t)),i=sizeToSquarishShape(s=a/2*r*(n/2)).map(t=>2*t)}else i=sizeToSquarishShape(s);return i}function isReshapeFree(t,r){if(arraysEqual(t=t.slice(-2),r=r.slice(-2))||!t.length||!r.length||0===t[0]||0===t[1]||0===r[0]||0===r[1])return!0;if(t.length!==r.length){let a=t[t.length-1],n=r[r.length-1];if(a===n||a%2==0&&n%2==0&&(1===t[0]||1===r[0]))return!0}return t[1]===r[1]&&t[0]%2==0&&r[0]%2==0}function getWebGLMaxTextureSize(t){if(null==u){let r=getWebGLContext(t);u=r.getParameter(r.MAX_TEXTURE_SIZE)}return u}function getMaxTexturesInShader(t){if(null==p){let r=getWebGLContext(t);p=r.getParameter(r.MAX_TEXTURE_IMAGE_UNITS)}return Math.min(16,p)}function getWebGLDisjointQueryTimerVersion(t){if(0===t)return 0;let r=getWebGLContext(t);return hasExtension(r,"EXT_disjoint_timer_query_webgl2")&&2===t?2:+!!hasExtension(r,"EXT_disjoint_timer_query")}function hasExtension(t,r){return null!=t.getExtension(r)}function isWebGLVersionEnabled(t){try{let r=getWebGLContext(t);if(null!=r)return!0}catch(t){console.log("Error when getting WebGL context: ",t)}return!1}function isCapableOfRenderingToFloatTexture(t){if(0===t)return!1;let r=getWebGLContext(t);if(1===t){if(!hasExtension(r,"OES_texture_float"))return!1}else if(!hasExtension(r,"EXT_color_buffer_float"))return!1;return createFloatTextureAndBindToFramebuffer(r)}function isDownloadFloatTextureEnabled(t){if(0===t)return!1;let r=getWebGLContext(t);if(1===t){if(!hasExtension(r,"OES_texture_float")||!hasExtension(r,"WEBGL_color_buffer_float"))return!1}else{if(hasExtension(r,"EXT_color_buffer_float"))return createFloatTextureAndBindToFramebuffer(r);let t="EXT_color_buffer_half_float";if(hasExtension(r,t)){let a=r.getExtension(t);return createHalfFloatTextureAndBindToFramebuffer(r,a)}return!1}return createFloatTextureAndBindToFramebuffer(r)}function createFloatTextureAndBindToFramebuffer(t){let r=getTextureConfig(t),a=t.createTexture();t.bindTexture(t.TEXTURE_2D,a),t.texImage2D(t.TEXTURE_2D,0,r.internalFormatFloat,1,1,0,r.textureFormatFloat,r.textureTypeFloat,null);let n=t.createFramebuffer();t.bindFramebuffer(t.FRAMEBUFFER,n),t.framebufferTexture2D(t.FRAMEBUFFER,t.COLOR_ATTACHMENT0,t.TEXTURE_2D,a,0);let s=t.checkFramebufferStatus(t.FRAMEBUFFER)===t.FRAMEBUFFER_COMPLETE;return t.bindTexture(t.TEXTURE_2D,null),t.bindFramebuffer(t.FRAMEBUFFER,null),t.deleteTexture(a),t.deleteFramebuffer(n),s}function createHalfFloatTextureAndBindToFramebuffer(t,r){let a=getTextureConfig(t,r),n=t.createTexture();t.bindTexture(t.TEXTURE_2D,n),t.texImage2D(t.TEXTURE_2D,0,a.internalFormatHalfFloat,1,1,0,a.textureFormatFloat,a.textureTypeHalfFloat,null);let s=t.createFramebuffer();t.bindFramebuffer(t.FRAMEBUFFER,s),t.framebufferTexture2D(t.FRAMEBUFFER,t.COLOR_ATTACHMENT0,t.TEXTURE_2D,n,0);let i=t.checkFramebufferStatus(t.FRAMEBUFFER)===t.FRAMEBUFFER_COMPLETE;return t.bindTexture(t.TEXTURE_2D,null),t.bindFramebuffer(t.FRAMEBUFFER,null),t.deleteTexture(n),t.deleteFramebuffer(s),i}function isWebGLFenceEnabled(t){return 2===t&&null!=getWebGLContext(t).fenceSync}function webgl_util_assertNotComplex(t,r){Array.isArray(t)||(t=[t]),t.forEach(t=>{null!=t&&assert("complex64"!==t.dtype,()=>`${r} does not support complex64 tensors in the WebGL backend.`)})}let u8=eV;function getGlslDifferences(){let t,r,a,n,s,i,o,l,u,p;return 2===eV.getNumber("WEBGL_VERSION")?(t="#version 300 es",r="in",a="out",n="in",s="texture",i="outputColor",o="out vec4 outputColor;",l=eV.getBool("WEBGL2_ISNAN_CUSTOM")?` bool isnan_custom(float val) { uint floatToUint = floatBitsToUint(val); return (floatToUint & 0x7fffffffu) > 0x7f800000u; } bvec4 isnan_custom(vec4 val) { return bvec4(isnan_custom(val.x), isnan_custom(val.y), isnan_custom(val.z), isnan_custom(val.w)); } #define isnan(value) isnan_custom(value) `:"",u="",p=` #define round(value) newRound(value) int newRound(float value) { return int(floor(value + 0.5)); } ivec4 newRound(vec4 value) { return ivec4(floor(value + vec4(0.5))); } `):(t="",r="attribute",a="varying",n="varying",s="texture2D",i="gl_FragColor",o="",l=` #define isnan(value) isnan_custom(value) bool isnan_custom(float val) { return (val > 0. || val < 1. || val == 0.) ? false : true; } bvec4 isnan_custom(vec4 val) { return bvec4(isnan(val.x), isnan(val.y), isnan(val.z), isnan(val.w)); } `,u=` uniform float INFINITY; bool isinf(float val) { return abs(val) == INFINITY; } bvec4 isinf(vec4 val) { return equal(abs(val), vec4(INFINITY)); } `,p=` int round(float value) { return int(floor(value + 0.5)); } ivec4 round(vec4 value) { return ivec4(floor(value + vec4(0.5))); } `),{version:t,attribute:r,varyingVs:a,varyingFs:n,texture2D:s,output:i,defineOutput:o,defineSpecialNaN:l,defineSpecialInf:u,defineRound:p}}function getLogicalCoordinatesFromFlatIndex(t,r,a="index"){let n=computeStrides(r);return n.map((r,s)=>{let i=`int ${t[s]} = ${a} / ${r}`,o=s===n.length-1?`int ${t[s+1]} = ${a} - ${t[s]} * ${r}`:`index -= ${t[s]} * ${r}`;return`${i}; ${o};`}).join("")}function getOutputLogicalCoordinatesFromFlatIndexByUniform(t,r,a="index"){let n=computeStrides(r);return n.map((r,s)=>{let i=`int ${t[s]} = ${a} / outShapeStrides[${s}]`,o=s===n.length-1?`int ${t[s+1]} = ${a} - ${t[s]} * outShapeStrides[${s}]`:`index -= ${t[s]} * outShapeStrides[${s}]`;return`${i}; ${o};`}).join("")}function symbolicallyComputeStrides(t,r){let a=t.length,n=t.map(t=>`${r}[${t}]`),s=Array(a-1);s[a-2]=n[a-1];for(let t=a-3;t>=0;--t)s[t]=`(${s[t+1]} * ${n[t+1]})`;return s}function getLogicalCoordinatesFromFlatIndexByUniform(t,r,a="index"){let n=symbolicallyComputeStrides(t.map((t,r)=>r),r);return n.map((r,s)=>{let i=`int ${t[s]} = ${a} / ${n[s]}`,o=s===n.length-1?`int ${t[s+1]} = ${a} - ${t[s]} * ${n[s]}`:`index -= ${t[s]} * ${n[s]}`;return`${i}; ${o};`}).join("")}function getFlatIndexFrom3D(t){let r=computeStrides(t).map(t=>t.toString());return` int getFlatIndex(ivec3 coords) { return coords.x * ${r[0]} + coords.y * ${r[1]} + coords.z; } `}function getFlatIndexFrom3DOutput(){return` int getFlatIndex(ivec3 coords) { return coords.x * outShapeStrides[0] + coords.y * outShapeStrides[1] + coords.z; } `}u8.registerFlag("HAS_WEBGL",()=>u8.getNumber("WEBGL_VERSION")>0),u8.registerFlag("WEBGL_VERSION",()=>isWebGLVersionEnabled(2)?2:+!!isWebGLVersionEnabled(1)),u8.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS",()=>!1),u8.registerFlag("WEBGL_BUFFER_SUPPORTED",()=>2===u8.get("WEBGL_VERSION")),u8.registerFlag("WEBGL_CPU_FORWARD",()=>!0),u8.registerFlag("WEBGL_FORCE_F16_TEXTURES",()=>!1),u8.registerFlag("WEBGL_PACK",()=>u8.getBool("HAS_WEBGL")),u8.registerFlag("WEBGL_PACK_NORMALIZATION",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_PACK_CLIP",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_PACK_DEPTHWISECONV",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_PACK_BINARY_OPERATIONS",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_PACK_UNARY_OPERATIONS",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_PACK_REDUCE",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_LAZILY_UNPACK",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_CONV_IM2COL",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_PACK_CONV2DTRANSPOSE",()=>u8.getBool("WEBGL_PACK")),u8.registerFlag("WEBGL_MAX_TEXTURE_SIZE",()=>getWebGLMaxTextureSize(u8.getNumber("WEBGL_VERSION"))),u8.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER",()=>getMaxTexturesInShader(u8.getNumber("WEBGL_VERSION"))),u8.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION",()=>{let t=u8.getNumber("WEBGL_VERSION");return 0===t?0:getWebGLDisjointQueryTimerVersion(t)}),u8.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE",()=>u8.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0&&!isMobile()),u8.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE",()=>isCapableOfRenderingToFloatTexture(u8.getNumber("WEBGL_VERSION"))),u8.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED",()=>!u8.getBool("WEBGL_FORCE_F16_TEXTURES")&&u8.getBool("WEBGL_RENDER_FLOAT32_CAPABLE")),u8.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED",()=>isDownloadFloatTextureEnabled(u8.getNumber("WEBGL_VERSION"))),u8.registerFlag("WEBGL_FENCE_API_ENABLED",()=>isWebGLFenceEnabled(u8.getNumber("WEBGL_VERSION"))),u8.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM",()=>4*!!u8.getBool("WEBGL_RENDER_FLOAT32_ENABLED")),u8.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD",()=>-1,t=>{if("number"!=typeof t)throw Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be a number but got ${t}.`);if(t<0&&-1!==t)throw Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${t}.`)}),u8.registerFlag("WEBGL_FLUSH_THRESHOLD",()=>isMobile()?1:-1,t=>{if("number"!=typeof t)throw Error(`WEBGL_FLUSH_THRESHOLD must be a number but got ${t}.`);if(t<0&&-1!==t)throw Error(`WEBGL_FLUSH_THRESHOLD must be -1 (indicating never manual flush) or at least 0, but got ${t}.`)}),u8.registerFlag("CPU_HANDOFF_SIZE_THRESHOLD",()=>128),u8.registerFlag("WEBGL_USE_SHAPES_UNIFORMS",()=>!1),u8.registerFlag("TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD",()=>1e5),u8.registerFlag("TOPK_K_CPU_HANDOFF_THRESHOLD",()=>128),u8.registerFlag("WEBGL_EXP_CONV",()=>!1),u8.registerFlag("SOFTWARE_WEBGL_ENABLED",()=>u8.getBool("IS_TEST")),u8.registerFlag("WEBGL_MAX_SIZE_FOR_NARROW_TEXTURE",()=>1/0),u8.registerFlag("WEBGL_AUTO_SQUARIFY_NARROW_TEXTURE_SHAPE",()=>!1),u8.registerFlag("WEBGL2_ISNAN_CUSTOM",()=>!1),u8.registerFlag("ENGINE_COMPILE_ONLY",()=>!1);let u7=` const float FLOAT_MAX = 1.70141184e38; const float FLOAT_MIN = 1.17549435e-38; lowp vec4 encode_float(highp float v) { if (isnan(v)) { return vec4(255, 255, 255, 255); } highp float av = abs(v); if(av < FLOAT_MIN) { return vec4(0.0, 0.0, 0.0, 0.0); } else if(v > FLOAT_MAX) { return vec4(0.0, 0.0, 128.0, 127.0) / 255.0; } else if(v < -FLOAT_MAX) { return vec4(0.0, 0.0, 128.0, 255.0) / 255.0; } highp vec4 c = vec4(0,0,0,0); highp float e = floor(log2(av)); highp float m = exp2(fract(log2(av))) - 1.0; c[2] = floor(128.0 * m); m -= c[2] / 128.0; c[1] = floor(32768.0 * m); m -= c[1] / 32768.0; c[0] = floor(8388608.0 * m); highp float ebias = e + 127.0; c[3] = floor(ebias / 2.0); ebias -= c[3] * 2.0; c[2] += floor(ebias) * 128.0; c[3] += 128.0 * step(0.0, -v); return c / 255.0; } `,{getBroadcastDims:u9}=eg;function makeShader(t,r,a){let n,s,i=[];if(t.forEach(t=>{let r=sizeFromShape(t.shapeInfo.logicalShape);if(t.shapeInfo.isUniform?i.push(`uniform float ${t.name}${r>1?`[${r}]`:""};`):(i.push(`uniform sampler2D ${t.name};`),i.push(`uniform int offset${t.name};`)),a.enableShapeUniforms){let{uniformShape:r}=getUniformInfoFromShape(a.packedInputs,t.shapeInfo.logicalShape,t.shapeInfo.texShape);switch(r.length){case 1:i.push(`uniform int ${t.name}Shape;`);break;case 2:i.push(`uniform ivec2 ${t.name}Shape;`);break;case 3:i.push(`uniform ivec3 ${t.name}Shape;`);break;case 4:i.push(`uniform ivec4 ${t.name}Shape;`)}i.push(`uniform ivec2 ${t.name}TexShape;`)}}),a.enableShapeUniforms){switch(r.logicalShape.length){case 1:i.push("uniform int outShape;");break;case 2:i.push("uniform ivec2 outShape;"),i.push("uniform int outShapeStrides;");break;case 3:i.push("uniform ivec3 outShape;"),i.push("uniform ivec2 outShapeStrides;");break;case 4:i.push("uniform ivec4 outShape;"),i.push("uniform ivec3 outShapeStrides;")}i.push("uniform ivec2 outTexShape;")}a.customUniforms&&a.customUniforms.forEach(t=>{i.push(`uniform ${t.type} ${t.name}${t.arrayIndex?`[${t.arrayIndex}]`:""};`)});let o=i.join(` `),l=t.map(t=>getInputSamplingSnippet(t,r,a.packedInputs,a.enableShapeUniforms)).join(` `),u=r.texShape,p=getGlslDifferences(),m=getFloatTextureSampleSnippet(p),y=getShaderPrefix(p);return r.isPacked?(n=getPackedOutputSamplingSnippet(r.logicalShape,u,a.enableShapeUniforms),s=getFloatTextureSetRGBASnippet(p)):(n=getOutputSamplingSnippet(r.logicalShape,u,a.enableShapeUniforms),s=getFloatTextureSetRSnippet(p)),a.packedInputs&&(y+=pn),[y,m,s,o,n,l,a.userCode].join(` `)}function getSamplerFromInInfo(t,r=!1){let a=t.shapeInfo.logicalShape;switch(a.length){case 0:return getSamplerScalar(t,r);case 1:return getSampler1D(t,r);case 2:return getSampler2D(t,r);case 3:return getSampler3D(t,r);case 4:return getSampler4D(t,r);case 5:return getSampler5D(t);case 6:return getSampler6D(t);default:throw Error(`${a.length}-D input sampling is not yet supported`)}}function getPackedSamplerFromInInfo(t,r){switch(t.shapeInfo.logicalShape.length){case 0:return getPackedSamplerScalar(t);case 1:return getPackedSampler1D(t,r);case 2:return getPackedSampler2D(t,r);case 3:return getPackedSampler3D(t,r);default:return getPackedSamplerND(t,r)}}function getInputSamplingSnippet(t,r,a=!1,n){let s="";a?s+=getPackedSamplerFromInInfo(t,n):s+=getSamplerFromInInfo(t,n);let i=t.shapeInfo.logicalShape,o=r.logicalShape;return i.length<=o.length&&(a?s+=getPackedSamplerAtOutputCoords(t,r):s+=getSamplerAtOutputCoords(t,r)),s}function getPackedOutputSamplingSnippet(t,r,a){switch(t.length){case 0:return getOutputScalarCoords();case 1:return getOutputPacked1DCoords(t,r,a);case 2:return getOutputPacked2DCoords(t,r,a);case 3:return getOutputPacked3DCoords(t,r,a);default:return getOutputPackedNDCoords(t,r,a)}}function getOutputSamplingSnippet(t,r,a){switch(t.length){case 0:return getOutputScalarCoords();case 1:return getOutput1DCoords(t,r,a);case 2:return getOutput2DCoords(t,r,a);case 3:return getOutput3DCoords(t,r,a);case 4:return getOutput4DCoords(t,r,a);case 5:return getOutput5DCoords(t,r);case 6:return getOutput6DCoords(t,r);default:throw Error(`${t.length}-D output sampling is not yet supported`)}}function getFloatTextureSampleSnippet(t){return` float sampleTexture(sampler2D textureSampler, vec2 uv) { return ${t.texture2D}(textureSampler, uv).r; } `}function getFloatTextureSetRSnippet(t){return` void setOutput(float val) { ${t.output} = vec4(val, 0, 0, 0); } `}function getFloatTextureSetRGBASnippet(t){return` void setOutput(vec4 val) { ${t.output} = val; } `}function getShaderPrefix(t){return`${t.version} precision highp float; precision highp int; precision highp sampler2D; ${t.varyingFs} vec2 resultUV; ${t.defineOutput} const vec2 halfCR = vec2(0.5, 0.5); struct ivec5 { int x; int y; int z; int w; int u; }; struct ivec6 { int x; int y; int z; int w; int u; int v; }; uniform float NAN; ${t.defineSpecialNaN} ${t.defineSpecialInf} ${t.defineRound} int imod(int x, int y) { return x - y * (x / y); } int idiv(int a, int b, float sign) { int res = a / b; int mod = imod(a, b); if (sign < 0. && mod != 0) { res -= 1; } return res; } //Based on the work of Dave Hoskins //https://www.shadertoy.com/view/4djSRW #define HASHSCALE1 443.8975 float random(float seed){ vec2 p = resultUV * seed; vec3 p3 = fract(vec3(p.xyx) * HASHSCALE1); p3 += dot(p3, p3.yzx + 19.19); return fract((p3.x + p3.y) * p3.z); } ${pe} ${pt} ${pr} `}let pe=` vec2 uvFromFlat(int texNumR, int texNumC, int index) { int texR = index / texNumC; int texC = index - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } vec2 packedUVfrom1D(int texNumR, int texNumC, int index) { int texelIndex = index / 2; int texR = texelIndex / texNumC; int texC = texelIndex - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } `,pt=` vec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR, int texNumC, int row, int col) { int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2); int texR = texelIndex / texNumC; int texC = texelIndex - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } `,pr=` vec2 packedUVfrom3D(int texNumR, int texNumC, int texelsInBatch, int texelsInLogicalRow, int b, int row, int col) { int index = b * texelsInBatch + (row / 2) * texelsInLogicalRow + (col / 2); int texR = index / texNumC; int texC = index - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } `,pn=` float getChannel(vec4 frag, vec2 innerDims) { vec2 modCoord = mod(innerDims, 2.); return modCoord.x == 0. ? (modCoord.y == 0. ? frag.r : frag.g) : (modCoord.y == 0. ? frag.b : frag.a); } float getChannel(vec4 frag, int dim) { float modCoord = mod(float(dim), 2.); return modCoord == 0. ? frag.r : frag.g; } `;function getOutputScalarCoords(){return` int getOutputCoords() { return 0; } `}function getOutputPacked1DCoords(t,r,a){let n=[Math.ceil(r[0]/2),Math.ceil(r[1]/2)];return 1===n[0]?a?` int getOutputCoords() { return 2 * int(resultUV.x * ceil(float(outTexShape[1]) / 2.0)); } `:` int getOutputCoords() { return 2 * int(resultUV.x * ${n[1]}.0); } `:1===n[1]?a?` int getOutputCoords() { return 2 * int(resultUV.y * ceil(float(outTexShape[0]) / 2.0)); } `:` int getOutputCoords() { return 2 * int(resultUV.y * ${n[0]}.0); } `:a?` int getOutputCoords() { ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); ivec2 resTexRC = ivec2(resultUV.yx * vec2(packedTexShape[0], packedTexShape[1])); return 2 * (resTexRC.x * packedTexShape[1] + resTexRC.y); } `:` int getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${n[0]}, ${n[1]})); return 2 * (resTexRC.x * ${n[1]} + resTexRC.y); } `}function getOutput1DCoords(t,r,a){return 1===r[0]?a?` int getOutputCoords() { return int(resultUV.x * float(outTexShape[1])); } `:` int getOutputCoords() { return int(resultUV.x * ${r[1]}.0); } `:1===r[1]?a?` int getOutputCoords() { return int(resultUV.y * float(outTexShape[0])); } `:` int getOutputCoords() { return int(resultUV.y * ${r[0]}.0); } `:a?` int getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(outTexShape[0], outTexShape[1])); return resTexRC.x * outTexShape[1] + resTexRC.y; } `:` int getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${r[0]}, ${r[1]})); return resTexRC.x * ${r[1]} + resTexRC.y; } `}function getOutputPacked3DCoords(t,r,a){if(a)return` ivec3 getOutputCoords() { ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); int texelsInLogicalRow = int(ceil(float(outShape[2]) / 2.0)); int texelsInBatch = texelsInLogicalRow * int(ceil(float(outShape[1]) / 2.0)); ivec2 resTexRC = ivec2(resultUV.yx * vec2(packedTexShape[0], packedTexShape[1])); int index = resTexRC.x * packedTexShape[1] + resTexRC.y; int b = index / texelsInBatch; index -= b * texelsInBatch; int r = 2 * (index / texelsInLogicalRow); int c = imod(index, texelsInLogicalRow) * 2; return ivec3(b, r, c); } `;let n=[Math.ceil(r[0]/2),Math.ceil(r[1]/2)],s=Math.ceil(t[2]/2),i=s*Math.ceil(t[1]/2);return` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${n[0]}, ${n[1]})); int index = resTexRC.x * ${n[1]} + resTexRC.y; int b = index / ${i}; index -= b * ${i}; int r = 2 * (index / ${s}); int c = imod(index, ${s}) * 2; return ivec3(b, r, c); } `}function getOutput3DCoords(t,r,a){if(a){let r=getOutputLogicalCoordinatesFromFlatIndexByUniform(["r","c","d"],t);return` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(outTexShape[0], outTexShape[1])); int index = resTexRC.x * outTexShape[1] + resTexRC.y; ${r} return ivec3(r, c, d); } `}let n=getLogicalCoordinatesFromFlatIndex(["r","c","d"],t);return` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${r[0]}, ${r[1]})); int index = resTexRC.x * ${r[1]} + resTexRC.y; ${n} return ivec3(r, c, d); } `}function getOutputPackedNDCoords(t,r,a){if(a)return` ivec4 getOutputCoords() { ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); ivec2 resTexRC = ivec2(resultUV.yx * vec2(packedTexShape[0], packedTexShape[1])); int index = resTexRC.x * packedTexShape[1] + resTexRC.y; int texelsInLogicalRow = int(ceil(float(outShape[3]) / 2.0)); int texelsInBatch = texelsInLogicalRow * int(ceil(float(outShape[2]) / 2.0)); int texelsInBatchN = texelsInBatch * outShape[1]; int b2 = index / texelsInBatchN; index -= b2 * texelsInBatchN; int b = index / texelsInBatch; index -= b * texelsInBatch; int r = 2 * (index / texelsInLogicalRow); int c = imod(index, texelsInLogicalRow) * 2; return ivec4(b2, b, r, c); } `;let n=[Math.ceil(r[0]/2),Math.ceil(r[1]/2)],s=Math.ceil(t[t.length-1]/2),i=s*Math.ceil(t[t.length-2]/2),o=i,l="",u="b, r, c";for(let r=2;r=1?"coords = 0;":l.map(t=>`coords.${m[t+p]} = 0;`).join(` `);let y="";y=o<2&&i>0?"coords":t.shapeInfo.logicalShape.map((t,r)=>`coords.${m[r+p]}`).join(", ");let _="return outputValue;",w=1===sizeFromShape(t.shapeInfo.logicalShape),I=1===sizeFromShape(r.logicalShape);if(1!==i||w||I){if(w&&!I)_=1===o?` return vec4(outputValue.x, outputValue.x, 0., 0.); `:` return vec4(outputValue.x); `;else if(l.length){let t=i-2,r=i-1;l.indexOf(t)>-1&&l.indexOf(r)>-1?_="return vec4(outputValue.x);":l.indexOf(t)>-1?_="return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);":l.indexOf(r)>-1&&(_="return vec4(outputValue.xx, outputValue.zz);")}}else _=` return vec4(outputValue.xy, outputValue.xy); `;return` vec4 ${"get"+s+"AtOutCoords"}() { ${u} coords = getOutputCoords(); ${a} vec4 outputValue = get${s}(${y}); ${_} } `}function getSamplerAtOutputCoords(t,r){let a,n=t.name,s=n.charAt(0).toUpperCase()+n.slice(1),i="get"+s+"AtOutCoords",o=r.texShape,l=t.shapeInfo.texShape,u=t.shapeInfo.logicalShape.length,p=r.logicalShape.length;if(!t.shapeInfo.isUniform&&u===p&&null==t.shapeInfo.flatOffset&&arraysEqual(l,o))return` float ${i}() { return sampleTexture(${n}, resultUV); } `;let m=getCoordsDataType(p),y=u9(t.shapeInfo.logicalShape,r.logicalShape),_=p-u,w=["x","y","z","w","u","v"];a=0===u?"":p<2&&y.length>=1?"coords = 0;":y.map(t=>`coords.${w[t+_]} = 0;`).join(` `);let I="";return I=p<2&&u>0?"coords":t.shapeInfo.logicalShape.map((t,r)=>`coords.${w[r+_]}`).join(", "),` float ${i}() { ${m} coords = getOutputCoords(); ${a} return get${s}(${I}); } `}function getCoordsDataType(t){if(t<=1)return"int";if(2===t)return"ivec2";if(3===t)return"ivec3";if(4===t)return"ivec4";if(5===t)return"ivec5";else if(6===t)return"ivec6";else throw Error(`GPU for rank ${t} is not yet supported`)}function getUniformInfoFromShape(t,r,a){let{newShape:n,keptDims:s}=util_base_squeezeShape(r),i=r.length,o=t&&3===i&&1===r[0],l=o?r.slice(1):n,u=!t&&i>1&&!arraysEqual(r,a)&&n.lengtht[r]).join(", ")}function compileProgram(t,r,a,n){let s=a.map((t,a)=>{let n={logicalShape:t.shape,texShape:t.isUniform?null:t.texData.texShape,isUniform:t.isUniform,isPacked:!t.isUniform&&t.texData.isPacked,flatOffset:null};return null!=t.texData&&null!=t.texData.slice&&t.texData.slice.flatOffset>0&&(n.flatOffset=t.texData.slice.flatOffset),{name:r.variableNames[a],shapeInfo:n}}),i=s.map(t=>t.shapeInfo),o={logicalShape:n.shape,texShape:n.texData.texShape,isUniform:!1,isPacked:n.texData.isPacked,flatOffset:null},l=makeShader(s,o,r),u=createFragmentShader(t.gl,l),p=t.createProgram(u);return eV.get("ENGINE_COMPILE_ONLY")?{program:r,fragmentShader:u,source:l,webGLProgram:p,inShapeInfos:i,outShapeInfo:o,variablesLocations:null,customUniformLocations:null,infLoc:null,nanLoc:null,outShapeLocation:null,outShapeStridesLocation:null,outTexShapeLocation:null}:(t.buildVao(p),Object.assign({program:r,fragmentShader:u,source:l,webGLProgram:p,inShapeInfos:i,outShapeInfo:o},getUniformLocations(t,r,p)))}function getUniformLocations(t,r,a){let n,s,i,o=[],l=[],u=null,p=null;for(let n of(p=t.getUniformLocation(a,"NAN",!1),1===eV.getNumber("WEBGL_VERSION")&&(u=t.getUniformLocation(a,"INFINITY",!1)),r.variableNames)){let s={name:n,uniform:t.getUniformLocation(a,n,!1),offset:t.getUniformLocation(a,`offset${n}`,!1)};r.enableShapeUniforms&&(s.shape=t.getUniformLocation(a,`${n}Shape`,!1),s.texShape=t.getUniformLocation(a,`${n}TexShape`,!1)),o.push(s)}if(r.enableShapeUniforms&&(n=t.getUniformLocation(a,"outShape",!1),i=t.getUniformLocation(a,"outShapeStrides",!1),s=t.getUniformLocation(a,"outTexShape",!1)),r.customUniforms)for(let n of r.customUniforms)l.push(t.getUniformLocation(a,n.name,!1));return{variablesLocations:o,customUniformLocations:l,infLoc:u,nanLoc:p,outShapeLocation:n,outShapeStridesLocation:i,outTexShapeLocation:s}}function validateBinaryAndProgram(t,r){if(t.length!==r.length)throw Error(`Binary was compiled with ${t.length} inputs, but was executed with ${r.length} inputs`);t.forEach((t,a)=>{let n=t.logicalShape,s=r[a],i=s.shape;if(!arraysEqual(n,i))throw Error(`Binary was compiled with different shapes than the current args. Shapes ${n} and ${i} must match`);if(t.isUniform&&s.isUniform)return;let o=t.texShape,l=s.isUniform?null:s.texData.texShape;if(!arraysEqual(o,l))throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${o} and ${l} must match`)})}function runProgram(t,r,a,n,s){r.program.enableShapeUniforms||(validateBinaryAndProgram(r.inShapeInfos,a),validateBinaryAndProgram([r.outShapeInfo],[n]));let i=n.texData.texture,o=n.texData.texShape;n.texData.isPacked?t.setOutputPackedMatrixTexture(i.texture,o[0],o[1]):t.setOutputMatrixTexture(i.texture,o[0],o[1]),t.setProgram(r.webGLProgram),t.bindVertexArray(r.webGLProgram.vao),1===eV.getNumber("WEBGL_VERSION")&&null!==r.infLoc&&t.gl.uniform1f(r.infLoc,1/0),null!==r.nanLoc&&t.gl.uniform1f(r.nanLoc,NaN);for(let n=0;nsizeFromShape(s.shape))t.gl.uniform1f(i,s.uniformValues[0]);else{let r=s.uniformValues;r instanceof Float32Array||(r=new Float32Array(r)),t.gl.uniform1fv(i,r)}continue}null!=s.texData.slice&&null!=o&&t.gl.uniform1i(o,s.texData.slice.flatOffset),t.setInputMatrixTexture(s.texData.texture.texture,i,n)}}let l=r.outShapeLocation;if(l)switch(n.shape.length){case 1:t.gl.uniform1iv(l,new Int32Array(n.shape));break;case 2:t.gl.uniform2iv(l,new Int32Array(n.shape));break;case 3:t.gl.uniform3iv(l,new Int32Array(n.shape));break;case 4:t.gl.uniform4iv(l,new Int32Array(n.shape))}if(r.outShapeStridesLocation){let a=computeStrides(n.shape);switch(n.shape.length){case 2:t.gl.uniform1iv(r.outShapeStridesLocation,new Int32Array(a));break;case 3:t.gl.uniform2iv(r.outShapeStridesLocation,new Int32Array(a));break;case 4:t.gl.uniform3iv(r.outShapeStridesLocation,new Int32Array(a))}}if(r.outTexShapeLocation&&t.gl.uniform2i(r.outTexShapeLocation,n.texData.texShape[0],n.texData.texShape[1]),r.program.customUniforms&&s)for(let a=0;a{let s=null!=r.texData&&null!=r.texData.slice&&r.texData.slice.flatOffset>0;if(t.enableShapeUniforms&&!r.isUniform){let i=r.texData.texShape,{useSqueezeShape:o,uniformShape:l,keptDims:u}=getUniformInfoFromShape(t.packedInputs,r.shape,i),p="",m="",y="";if(1===l.length&&t.packedInputs){let t=[Math.ceil(i[0]/2),Math.ceil(i[1]/2)];p=`${t[0]>1}_${t[1]>1}`}else if(2!==l.length||t.packedInputs){if(l.length>2&&!t.packedInputs){let t=computeStrides(l);y=`${t[0]===i[1]}_${t[t.length-1]===i[1]}`}}else m=`${l[0]>1}_${l[1]>1}`;let _=r.shape.length,w=2===l.length&&arraysEqual(r.shape,i),I=1===sizeFromShape(r.shape),C=getBroadcastDims(r.shape,a.shape),E=!t.packedInputs&&_===a.shape.length&&arraysEqual(i,a.texData.texShape),A=t.packedInputs||l.length>2?"":`${i[0]>1}_${i[1]>1}`;n+=`${_}_${E}_${o?u:""}_${l.length}_${I}_${C}_${w}_${p}_${m}_${y}_${A}_${s}`}else{let t=r.isUniform?"uniform":r.texData.texShape;n+=`${r.shape}_${t}_${s}`}});let s=t.userCode,i=t.constructor.name;return i+("_"+n+"_"+s+`${eV.getNumber("WEBGL_VERSION")}`)}function useShapeUniforms(t){return eV.getBool("WEBGL_USE_SHAPES_UNIFORMS")&&t<=4}let DecodeMatrixProgram=class DecodeMatrixProgram{constructor(t){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outPackingScheme=eo.DENSE,this.customUniforms=[{name:"texShape",type:"ivec2"}];const r=getGlslDifferences();this.outputShape=t,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length),this.userCode=` ivec3 outCoordsFromFlatIndex(int index) { ${this.enableShapeUniforms?getOutputLogicalCoordinatesFromFlatIndexByUniform(["r","c","d"],t):getLogicalCoordinatesFromFlatIndex(["r","c","d"],t)} return ivec3(r, c, d); } void main() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(texShape[0], texShape[1])); int index = 4 * (resTexRC.x * texShape[1] + resTexRC.y); vec4 result = vec4(0.); for (int i=0; i<4; i++) { int flatIndex = index + i; ivec3 rc = outCoordsFromFlatIndex(flatIndex); result[i] = getA(rc.x, rc.y, rc.z); } ${r.output} = result; } `}};let DecodeMatrixPackedProgram=class DecodeMatrixPackedProgram{constructor(t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outPackingScheme=eo.DENSE,this.customUniforms=[{name:"texShape",type:"ivec2"}];const r=getGlslDifferences();this.outputShape=t,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length),this.userCode=` ivec3 outCoordsFromFlatIndex(int index) { ${this.enableShapeUniforms?getOutputLogicalCoordinatesFromFlatIndexByUniform(["r","c","d"],t):getLogicalCoordinatesFromFlatIndex(["r","c","d"],t)} return ivec3(r, c, d); } void main() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(texShape[0], texShape[1])); int index = 4 * (resTexRC.x * texShape[1] + resTexRC.y); vec4 result = vec4(0.); for (int i=0; i<4; i++) { int flatIndex = index + i; ivec3 rc = outCoordsFromFlatIndex(flatIndex); result[i] = getChannel(getA(rc.x, rc.y, rc.z), vec2(rc.y, rc.z)); } ${r.output} = result; } `}};let EncodeFloatProgram=class EncodeFloatProgram{constructor(t){this.variableNames=["A"],this.outTexUsage=el.DOWNLOAD;const r=getGlslDifferences();this.outputShape=t,this.userCode=` ${u7} void main() { float x = getAAtOutCoords(); ${r.output} = encode_float(x); } `}};let EncodeFloatPackedProgram=class EncodeFloatPackedProgram{constructor(t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outTexUsage=el.DOWNLOAD;const r=getGlslDifferences();this.outputShape=t,this.userCode=` ${u7} void main() { ivec3 coords = getOutputCoords(); float x = getChannel(getAAtOutCoords(), vec2(coords.y, coords.z)); ${r.output} = encode_float(x); } `}};let ps={R:0,G:1,B:2,A:3};let EncodeMatrixProgram=class EncodeMatrixProgram{constructor(t,r=!1,a="RGBA"){this.variableNames=["A"],this.customUniforms=[{name:"texShape",type:"ivec2"}];const n=getGlslDifferences();this.outputShape=t,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length);let s="result";r&&(s="floor(result * 255. + 0.5)");let i="";for(let t=0;tt.bindTexture(l,o)),callAndCheck(t,()=>t.texParameteri(l,t.TEXTURE_WRAP_S,t.CLAMP_TO_EDGE)),callAndCheck(t,()=>t.texParameteri(l,t.TEXTURE_WRAP_T,t.CLAMP_TO_EDGE)),callAndCheck(t,()=>t.texParameteri(l,t.TEXTURE_MIN_FILTER,t.NEAREST)),callAndCheck(t,()=>t.texParameteri(l,t.TEXTURE_MAG_FILTER,t.NEAREST)),1===eV.getNumber("WEBGL_VERSION")?callAndCheck(t,()=>t.texImage2D(l,0,n,r,a,0,s,i,null)):callAndCheck(t,()=>t.texStorage2D(l,1,n,r,a)),callAndCheck(t,()=>t.bindTexture(t.TEXTURE_2D,null)),{texture:o,texShape:[a,r]}}function getInternalFormatForFloat32MatrixTexture(t){return t.internalFormatFloat}function createFloat32MatrixTexture(t,r,a,n){let[s,i]=[a,r];return createAndConfigureTexture(t,s,i,getInternalFormatForFloat32MatrixTexture(n),n.textureFormatFloat,t.FLOAT)}function getInternalFormatForFloat16MatrixTexture(t){return t.internalFormatHalfFloat}function createFloat16MatrixTexture(t,r,a,n){let[s,i]=[a,r];return createAndConfigureTexture(t,s,i,getInternalFormatForFloat16MatrixTexture(n),n.textureFormatFloat,n.textureTypeHalfFloat)}function getInternalFormatForUnsignedBytesMatrixTexture(t){return t.downloadTextureFormat}function createUnsignedBytesMatrixTexture(t,r,a,n){let[s,i]=[a,r];return createAndConfigureTexture(t,s,i,getInternalFormatForUnsignedBytesMatrixTexture(n),t.RGBA,t.UNSIGNED_BYTE)}function getInternalFormatForPackedMatrixTexture(t){return t.internalFormatPackedFloat}function createPackedMatrixTexture(t,r,a,n){let[s,i]=getPackedMatrixTextureShapeWidthHeight(r,a);return createAndConfigureTexture(t,s,i,getInternalFormatForPackedMatrixTexture(n),t.RGBA,t.FLOAT)}function getInternalFormatForFloat16PackedMatrixTexture(t){return t.internalFormatPackedHalfFloat}function createFloat16PackedMatrixTexture(t,r,a,n){let[s,i]=getPackedMatrixTextureShapeWidthHeight(r,a);return createAndConfigureTexture(t,s,i,getInternalFormatForFloat16PackedMatrixTexture(n),t.RGBA,n.textureTypeHalfFloat)}function bindVertexProgramAttributeStreams(t,r,a){return callAndCheck(t,()=>t.bindBuffer(t.ARRAY_BUFFER,a)),bindVertexBufferToProgramAttribute(t,r,"clipSpacePos",a,3,20,0)&&bindVertexBufferToProgramAttribute(t,r,"uv",a,2,20,12)}function uploadDenseMatrixToTexture(t,r,a,n,s,i){let o,l,u;callAndCheck(t,()=>t.bindTexture(t.TEXTURE_2D,r)),s instanceof Uint8Array?(o=new Uint8Array(a*n*4),l=t.UNSIGNED_BYTE,u=t.RGBA):(o=new Float32Array(a*n*4),l=t.FLOAT,u=i.internalFormatPackedFloat),o.set(s),2===eV.getNumber("WEBGL_VERSION")?callAndCheck(t,()=>t.texSubImage2D(t.TEXTURE_2D,0,0,0,a,n,t.RGBA,l,o)):callAndCheck(t,()=>t.texImage2D(t.TEXTURE_2D,0,u,a,n,0,t.RGBA,l,o)),callAndCheck(t,()=>t.bindTexture(t.TEXTURE_2D,null))}function uploadPixelDataToTexture(t,r,a){callAndCheck(t,()=>t.bindTexture(t.TEXTURE_2D,r)),a.data instanceof Uint8Array?2===eV.getNumber("WEBGL_VERSION")?callAndCheck(t,()=>t.texSubImage2D(t.TEXTURE_2D,0,0,0,a.width,a.height,t.RGBA,t.UNSIGNED_BYTE,a.data)):callAndCheck(t,()=>t.texImage2D(t.TEXTURE_2D,0,t.RGBA,a.width,a.height,0,t.RGBA,t.UNSIGNED_BYTE,a.data)):2===eV.getNumber("WEBGL_VERSION")?callAndCheck(t,()=>t.texSubImage2D(t.TEXTURE_2D,0,0,0,t.RGBA,t.UNSIGNED_BYTE,a)):callAndCheck(t,()=>t.texImage2D(t.TEXTURE_2D,0,t.RGBA,t.RGBA,t.UNSIGNED_BYTE,a)),callAndCheck(t,()=>t.bindTexture(t.TEXTURE_2D,null))}function createBufferFromOutputTexture(t,r,a,n){let s=t.createBuffer();callAndCheck(t,()=>t.bindBuffer(t.PIXEL_PACK_BUFFER,s));let i=16*r*a;return callAndCheck(t,()=>t.bufferData(t.PIXEL_PACK_BUFFER,i,t.STREAM_READ)),callAndCheck(t,()=>t.readPixels(0,0,a,r,t.RGBA,t.FLOAT,0)),callAndCheck(t,()=>t.bindBuffer(t.PIXEL_PACK_BUFFER,null)),s}function downloadFloat32MatrixFromBuffer(t,r,a){let n=new Float32Array(a);return t.bindBuffer(t.PIXEL_PACK_BUFFER,r),t.getBufferSubData(t.PIXEL_PACK_BUFFER,0,n),t.bindBuffer(t.PIXEL_PACK_BUFFER,null),n}function downloadByteEncodedFloatMatrixFromOutputTexture(t,r,a,n){let[s,i]=[a,r],o=new Uint8Array(getUnpackedArraySizeFromMatrixSize(r*a,4));return callAndCheck(t,()=>t.readPixels(0,0,s,i,n.downloadTextureFormat,t.UNSIGNED_BYTE,o)),new Float32Array(o.buffer)}function downloadPackedMatrixFromBuffer(t,r,a,n,s,i,o,l){let u=new Float32Array(getPackedRGBAArraySizeFromMatrixShape(i,o));return t.bindBuffer(t.PIXEL_PACK_BUFFER,r),t.getBufferSubData(t.PIXEL_PACK_BUFFER,0,u),t.bindBuffer(t.PIXEL_PACK_BUFFER,null),u}function downloadMatrixFromPackedOutputTexture(t,r,a){let n=new Float32Array(r*a*4);return callAndCheck(t,()=>t.readPixels(0,0,a,r,t.RGBA,t.FLOAT,n)),n}let GPGPUContext=class GPGPUContext{constructor(t){this.outputTexture=null,this.program=null,this.disposed=!1,this.itemsToPoll=[];const r=eV.getNumber("WEBGL_VERSION");if(null!=t?(this.gl=t,setWebGLContext(r,t)):this.gl=getWebGLContext(r),t=this.gl,2===eV.getNumber("WEBGL_VERSION")){const r=t;this.createVertexArray=()=>callAndCheck(r,()=>r.createVertexArray()),this.bindVertexArray=t=>callAndCheck(r,()=>r.bindVertexArray(t)),this.deleteVertexArray=t=>callAndCheck(r,()=>r.deleteVertexArray(t)),this.getVertexArray=()=>callAndCheck(r,()=>r.getParameter(r.VERTEX_ARRAY_BINDING))}else if(null!=t){const r=t.getExtension("OES_vertex_array_object");if(null==r)throw Error("All WebGL1 implementations are expected to offer OES_vertex_array_object.");this.createVertexArray=()=>callAndCheck(t,()=>r.createVertexArrayOES()),this.bindVertexArray=a=>callAndCheck(t,()=>r.bindVertexArrayOES(a)),this.deleteVertexArray=a=>callAndCheck(t,()=>r.deleteVertexArrayOES(a)),this.getVertexArray=()=>callAndCheck(t,()=>t.getParameter(r.VERTEX_ARRAY_BINDING_OES))}let a="WEBGL_color_buffer_float";const n="EXT_color_buffer_half_float";if(this.parallelCompilationExtension=this.gl.getExtension("KHR_parallel_shader_compile"),1===eV.getNumber("WEBGL_VERSION")){const t="OES_texture_half_float";if(this.textureFloatExtension=getExtensionOrThrow(this.gl,"OES_texture_float"),hasExtension(this.gl,t))this.textureHalfFloatExtension=getExtensionOrThrow(this.gl,t);else if(eV.get("WEBGL_FORCE_F16_TEXTURES"))throw Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.");if(this.colorBufferFloatExtension=this.gl.getExtension(a),hasExtension(this.gl,n))this.colorBufferHalfFloatExtension=getExtensionOrThrow(this.gl,n);else if(eV.get("WEBGL_FORCE_F16_TEXTURES"))throw Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.")}else if(a="EXT_color_buffer_float",hasExtension(this.gl,a))this.colorBufferFloatExtension=this.gl.getExtension(a);else if(hasExtension(this.gl,n))this.colorBufferHalfFloatExtension=this.gl.getExtension(n);else throw Error("GL context does not support color renderable floats");this.vertexBuffer=createVertexBuffer(this.gl),this.indexBuffer=createIndexBuffer(this.gl),this.framebuffer=createFramebuffer(this.gl),this.textureConfig=getTextureConfig(this.gl,this.textureHalfFloatExtension)}get debug(){return eV.getBool("DEBUG")}dispose(){if(this.disposed)return;null!=this.program&&console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."),null!=this.outputTexture&&console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing.");let t=this.gl;callAndCheck(t,()=>t.finish()),callAndCheck(t,()=>t.bindFramebuffer(t.FRAMEBUFFER,null)),callAndCheck(t,()=>t.deleteFramebuffer(this.framebuffer)),callAndCheck(t,()=>t.bindBuffer(t.ARRAY_BUFFER,null)),callAndCheck(t,()=>t.bindBuffer(t.ELEMENT_ARRAY_BUFFER,null)),callAndCheck(t,()=>t.deleteBuffer(this.indexBuffer)),this.disposed=!0}createFloat32MatrixTexture(t,r){return this.throwIfDisposed(),createFloat32MatrixTexture(this.gl,t,r,this.textureConfig)}createFloat16MatrixTexture(t,r){return this.throwIfDisposed(),createFloat16MatrixTexture(this.gl,t,r,this.textureConfig)}createUnsignedBytesMatrixTexture(t,r){return this.throwIfDisposed(),createUnsignedBytesMatrixTexture(this.gl,t,r,this.textureConfig)}uploadPixelDataToTexture(t,r){this.throwIfDisposed(),uploadPixelDataToTexture(this.gl,t,r)}uploadDenseMatrixToTexture(t,r,a,n){this.throwIfDisposed(),uploadDenseMatrixToTexture(this.gl,t,r,a,n,this.textureConfig)}createFloat16PackedMatrixTexture(t,r){return this.throwIfDisposed(),createFloat16PackedMatrixTexture(this.gl,t,r,this.textureConfig)}createPackedMatrixTexture(t,r){return this.throwIfDisposed(),createPackedMatrixTexture(this.gl,t,r,this.textureConfig)}deleteMatrixTexture(t){this.throwIfDisposed(),this.outputTexture===t&&(unbindColorTextureFromFramebuffer(this.gl,this.framebuffer),this.outputTexture=null),callAndCheck(this.gl,()=>this.gl.deleteTexture(t))}downloadByteEncodedFloatMatrixFromOutputTexture(t,r,a){return this.downloadMatrixDriver(t,()=>downloadByteEncodedFloatMatrixFromOutputTexture(this.gl,r,a,this.textureConfig))}downloadPackedMatrixFromBuffer(t,r,a,n,s,i){return downloadPackedMatrixFromBuffer(this.gl,t,r,a,n,s,i,this.textureConfig)}downloadFloat32MatrixFromBuffer(t,r){return downloadFloat32MatrixFromBuffer(this.gl,t,r)}createBufferFromTexture(t,r,a){this.bindTextureToFrameBuffer(t);let n=createBufferFromOutputTexture(this.gl,r,a,this.textureConfig);return this.unbindTextureToFrameBuffer(),n}createAndWaitForFence(){let t=this.createFence(this.gl);return this.pollFence(t)}createFence(t){let r,a;if(eV.getBool("WEBGL_FENCE_API_ENABLED")){let n=t.fenceSync(t.SYNC_GPU_COMMANDS_COMPLETE,0);t.flush(),a=()=>{let r=t.clientWaitSync(n,0,0);return r===t.ALREADY_SIGNALED||r===t.CONDITION_SATISFIED},r=n}else eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0?(r=this.beginQuery(),this.endQuery(),a=()=>this.isQueryAvailable(r,eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))):a=()=>!0;return{query:r,isFencePassed:a}}downloadMatrixFromPackedTexture(t,r,a){return this.downloadMatrixDriver(t,()=>downloadMatrixFromPackedOutputTexture(this.gl,r,a))}createProgram(t){this.throwIfDisposed();let r=this.gl;null==this.vertexShader&&(this.vertexShader=gpgpu_util_createVertexShader(r));let a=createProgram(r);callAndCheck(r,()=>r.attachShader(a,this.vertexShader)),callAndCheck(r,()=>r.attachShader(a,t)),linkProgram(r,a);let n=Object.assign(a,{vao:this.createVertexArray()});return this.debug&&validateProgram(r,n),n}buildVao(t){this.setProgram(t),this.bindVertexArray(t.vao);let r=this.gl;callAndCheck(r,()=>r.bindBuffer(r.ELEMENT_ARRAY_BUFFER,this.indexBuffer)),bindVertexProgramAttributeStreams(r,t,this.vertexBuffer)}deleteProgram(t){this.throwIfDisposed(),t===this.program&&(this.program=null),null!=t&&(callAndCheck(this.gl,()=>this.gl.deleteProgram(t)),this.deleteVertexArray(t.vao))}setProgram(t){this.throwIfDisposed(),this.program=t,null!=this.program&&this.debug&&validateProgram(this.gl,this.program),callAndCheck(this.gl,()=>this.gl.useProgram(t))}getUniformLocation(t,r,a=!0){return(this.throwIfDisposed(),a)?getProgramUniformLocationOrThrow(this.gl,t,r):getProgramUniformLocation(this.gl,t,r)}getAttributeLocation(t,r){return this.throwIfDisposed(),callAndCheck(this.gl,()=>this.gl.getAttribLocation(t,r))}getUniformLocationNoThrow(t,r){return this.throwIfDisposed(),this.gl.getUniformLocation(t,r)}setInputMatrixTexture(t,r,a){this.throwIfDisposed(),this.throwIfNoProgram(),bindTextureToProgramUniformSampler(this.gl,t,r,a)}setOutputMatrixTexture(t,r,a){this.setOutputMatrixTextureDriver(t,a,r)}setOutputPackedMatrixTexture(t,r,a){this.throwIfDisposed();let[n,s]=getPackedMatrixTextureShapeWidthHeight(r,a);this.setOutputMatrixTextureDriver(t,n,s)}setOutputMatrixWriteRegion(t,r,a,n){this.setOutputMatrixWriteRegionDriver(a,t,n,r)}setOutputPackedMatrixWriteRegion(t,r,a,n){throw Error("setOutputPackedMatrixWriteRegion not implemented.")}debugValidate(){null!=this.program&&validateProgram(this.gl,this.program),validateFramebuffer(this.gl)}executeProgram(){this.throwIfDisposed(),this.throwIfNoProgram();let t=this.gl;this.debug&&(console.assert(this.getVertexArray()===this.program.vao,"VAO changed between setProgram and executeProgram!"),this.debugValidate()),callAndCheck(t,()=>t.drawElements(t.TRIANGLES,6,t.UNSIGNED_SHORT,0))}blockUntilAllProgramsCompleted(){this.throwIfDisposed(),callAndCheck(this.gl,()=>this.gl.finish())}getQueryTimerExtension(){return null==this.disjointQueryTimerExtension&&(this.disjointQueryTimerExtension=getExtensionOrThrow(this.gl,2===eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")?"EXT_disjoint_timer_query_webgl2":"EXT_disjoint_timer_query")),this.disjointQueryTimerExtension}getQueryTimerExtensionWebGL2(){return this.getQueryTimerExtension()}getQueryTimerExtensionWebGL1(){return this.getQueryTimerExtension()}beginQuery(){if(2===eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")){let t=this.gl,r=this.getQueryTimerExtensionWebGL2(),a=t.createQuery();return t.beginQuery(r.TIME_ELAPSED_EXT,a),a}let t=this.getQueryTimerExtensionWebGL1(),r=t.createQueryEXT();return t.beginQueryEXT(t.TIME_ELAPSED_EXT,r),r}endQuery(){if(2===eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")){let t=this.gl,r=this.getQueryTimerExtensionWebGL2();t.endQuery(r.TIME_ELAPSED_EXT);return}let t=this.getQueryTimerExtensionWebGL1();t.endQueryEXT(t.TIME_ELAPSED_EXT)}async waitForQueryAndGetTime(t){return await repeatedTry(()=>this.disposed||this.isQueryAvailable(t,eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))),this.getQueryTime(t,eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))}getQueryTime(t,r){if(0===r)return null;if(2===r){let r=this.gl;return r.getQueryParameter(t,r.QUERY_RESULT)/1e6}{let r=this.getQueryTimerExtensionWebGL1();return r.getQueryObjectEXT(t,r.QUERY_RESULT_EXT)/1e6}}isQueryAvailable(t,r){if(0===r)return!0;if(2===r){let r=this.gl,a=this.getQueryTimerExtensionWebGL2(),n=r.getQueryParameter(t,r.QUERY_RESULT_AVAILABLE);return null==this.disjoint&&(this.disjoint=this.gl.getParameter(a.GPU_DISJOINT_EXT)),n&&!this.disjoint}{let r=this.getQueryTimerExtensionWebGL1(),a=r.getQueryObjectEXT(t,r.QUERY_RESULT_AVAILABLE_EXT);return null==this.disjoint&&(this.disjoint=this.gl.getParameter(r.GPU_DISJOINT_EXT)),a&&!this.disjoint}}pollFence(t){return new Promise(r=>{this.addItemToPoll(()=>t.isFencePassed(),()=>r())})}pollItems(){let t=linearSearchLastTrue(this.itemsToPoll.map(t=>t.isDoneFn));for(let r=0;r<=t;++r){let{resolveFn:t}=this.itemsToPoll[r];t()}this.itemsToPoll=this.itemsToPoll.slice(t+1)}addItemToPoll(t,r){let a;this.itemsToPoll.push({isDoneFn:t,resolveFn:r}),this.itemsToPoll.length>1||("setTimeoutCustom"in eV.platform&&(a=eV.platform.setTimeoutCustom.bind(eV.platform)),repeatedTry(()=>(this.pollItems(),0===this.itemsToPoll.length),()=>0,null,a))}bindTextureToFrameBuffer(t){this.throwIfDisposed(),bindColorTextureToFramebuffer(this.gl,t,this.framebuffer),this.debug&&validateFramebuffer(this.gl)}unbindTextureToFrameBuffer(){null!=this.outputTexture?(bindColorTextureToFramebuffer(this.gl,this.outputTexture,this.framebuffer),this.debug&&validateFramebuffer(this.gl)):unbindColorTextureFromFramebuffer(this.gl,this.framebuffer)}downloadMatrixDriver(t,r){this.bindTextureToFrameBuffer(t);let a=r();return this.unbindTextureToFrameBuffer(),a}setOutputMatrixTextureDriver(t,r,a){this.throwIfDisposed();let n=this.gl;bindColorTextureToFramebuffer(n,t,this.framebuffer),this.debug&&validateFramebuffer(n),this.outputTexture=t,callAndCheck(n,()=>n.viewport(0,0,r,a)),callAndCheck(n,()=>n.scissor(0,0,r,a))}setOutputMatrixWriteRegionDriver(t,r,a,n){this.throwIfDisposed(),callAndCheck(this.gl,()=>this.gl.scissor(t,r,a,n))}throwIfDisposed(){if(this.disposed)throw Error("Attempted to use disposed GPGPUContext.")}throwIfNoProgram(){if(null==this.program)throw Error("No GPU program is currently set.")}};function linearSearchLastTrue(t){let r=0;for(;r`${t}.${r}`)}function getChannels(t,r){return 1===r?[t]:getVecChannels(t,r)}function getSourceCoords(t,r){if(1===t)return"rc";let a="";for(let n=0;n ${this.enableShapeUniforms?"outShape":this.outputShape[0]}`;let r="";for(let a=this.rank-2;a= ${this.enableShapeUniforms?`outShape[${a}]`:this.outputShape[a]}`,a= ${a}; bool rEdge = rp1 >= ${n}; `}getOutput(t){let r=this.getSourceCoordsArr(t);if(1===this.rank){let t=this.enableShapeUniforms?"outShape":this.outputShape[0];return`getA(rc), (rc + 1 >= ${t} ? 0. : getA(rc + 1)), 0, 0`}return`getA(${r[0]}), cEdge ? 0. : getA(${r[1]}), rEdge ? 0. : getA(${r[2]}), rEdge || cEdge ? 0. : getA(${r[3]})`}};let ReshapePackedProgram=class ReshapePackedProgram{constructor(t,r){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.customUniforms=[{name:"inputShape",type:"ivec3"}],this.outputShape=t,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length);let a="";for(let t=0;t<4;t++){let r="thisRC = rc;";t%2==1&&(r+="thisRC.z += 1;"),t>1&&(r+="thisRC.y += 1;"),a+=` ${r} ${t>0?"if(thisRC.y < rows && thisRC.z < cols){":""} int flatIndex = getFlatIndex(thisRC); ivec3 inputRC = inputCoordsFromReshapedOutCoords(flatIndex); vec2 inputRCInnerDims = vec2(float(inputRC.y),float(inputRC.z)); result[${t}] = getChannel(getA(inputRC.x, inputRC.y, inputRC.z), inputRCInnerDims); ${t>0?"}":""} `}this.userCode=` ${getReshapedInputCoords(r,this.enableShapeUniforms)} ${this.enableShapeUniforms?getFlatIndexFrom3DOutput():getFlatIndexFrom3D(t)} void main() { ivec3 rc = getOutputCoords(); vec4 result = vec4(0.); ivec3 thisRC; int rows = ${this.enableShapeUniforms?"outShape[1]":t[1]}; int cols = ${this.enableShapeUniforms?"outShape[2]":t[2]}; ${a} setOutput(result); } `}};function getReshapedInputCoords(t,r){let a=r?getLogicalCoordinatesFromFlatIndexByUniform(["r","c","d"],"inputShape"):getLogicalCoordinatesFromFlatIndex(["r","c","d"],t);return` ivec3 inputCoordsFromReshapedOutCoords(int index) { ${a} return ivec3(r, c, d); } `}let TextureManager=class TextureManager{constructor(t){this.gpgpu=t,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0,this.freeTextures={},this.usedTextures={},this.logEnabled=!1}acquireTexture(t,r,a){let n,s=getPhysicalFromLogicalTextureType(r,a),i=getKeyFromTextureShape(t,s,a);i in this.freeTextures||(this.freeTextures[i]=[]),i in this.usedTextures||(this.usedTextures[i]=[]);let o=computeBytes(t,s,this.gpgpu.gl,this.gpgpu.textureConfig,a);if(this.freeTextures[i].length>0){this.numFreeTextures--,this.numUsedTextures++,this._numBytesFree-=o,this.log();let t=this.freeTextures[i].pop();return this.usedTextures[i].push(t),t}return s===eu.PACKED_2X2_FLOAT32?n=this.gpgpu.createPackedMatrixTexture(t[0],t[1]):s===eu.PACKED_2X2_FLOAT16?n=this.gpgpu.createFloat16PackedMatrixTexture(t[0],t[1]):s===eu.UNPACKED_FLOAT32?n=this.gpgpu.createFloat32MatrixTexture(t[0],t[1]):s===eu.UNPACKED_FLOAT16?n=this.gpgpu.createFloat16MatrixTexture(t[0],t[1]):s===eu.PACKED_4X1_UNSIGNED_BYTE&&(n=this.gpgpu.createUnsignedBytesMatrixTexture(t[0],t[1])),this.usedTextures[i].push(n),this.numUsedTextures++,this._numBytesAllocated+=o,this.log(),n}releaseTexture(t,r,a,n){if(null==this.freeTextures)return;let s=getPhysicalFromLogicalTextureType(a,n),i=getKeyFromTextureShape(r,s,n);i in this.freeTextures||(this.freeTextures[i]=[]);let o=computeBytes(r,s,this.gpgpu.gl,this.gpgpu.textureConfig,n),l=eV.getNumber("WEBGL_DELETE_TEXTURE_THRESHOLD");-1!==l&&this._numBytesAllocated>l?(this.gpgpu.deleteMatrixTexture(t.texture),this._numBytesAllocated-=o):(this.freeTextures[i].push(t),this.numFreeTextures++,this._numBytesFree+=o),this.numUsedTextures--;let u=this.usedTextures[i],p=u&&u.indexOf(t);if(null==p||p<0)throw Error("Cannot release a texture that was never provided by this texture manager");u[p]=u[u.length-1],u.pop(),this.log()}log(){if(!this.logEnabled)return;let t=this.numFreeTextures+this.numUsedTextures;console.log("Free/Used",`${this.numFreeTextures} / ${this.numUsedTextures}`,`(${t})`);let r=this._numBytesFree/this._numBytesAllocated;console.log(`Bytes allocated: ${this._numBytesAllocated}`),console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100*r)}%)`)}get numBytesAllocated(){return this._numBytesAllocated}get numBytesFree(){return this._numBytesFree}getNumUsedTextures(){return this.numUsedTextures}getNumFreeTextures(){return this.numFreeTextures}dispose(){if(null!=this.freeTextures){for(let t in this.freeTextures)this.freeTextures[t].forEach(t=>{this.gpgpu.deleteMatrixTexture(t.texture)});for(let t in this.usedTextures)this.usedTextures[t].forEach(t=>{this.gpgpu.deleteMatrixTexture(t.texture)});this.freeTextures=null,this.usedTextures=null,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0}}};function numBytesForInternalFormat(t,r){if(r===t.R32F)return 4;if(r===t.R16F)return 2;if(r===t.RGBA32F)return 16;if(r===t.RGBA)return 16;if(r===t.RGBA16F)return 8;else if(r===t.RGBA8)return 4;throw Error(`Unknown internal format ${r}`)}function computeBytes(t,r,a,n,s){let i,o=internalFormatForPhysicalTexType(r,n);if(s){let[r,a]=getPackedMatrixTextureShapeWidthHeight(t[0],t[1]);i=r*a}else{var l;let[r,a]=(l=t[0],[t[1],l]);i=r*a}return i*numBytesForInternalFormat(a,o)}function internalFormatForPhysicalTexType(t,r){switch(t){case eu.PACKED_2X2_FLOAT32:return getInternalFormatForPackedMatrixTexture(r);case eu.PACKED_2X2_FLOAT16:return getInternalFormatForFloat16PackedMatrixTexture(r);case eu.UNPACKED_FLOAT32:return getInternalFormatForFloat32MatrixTexture(r);case eu.UNPACKED_FLOAT16:return getInternalFormatForFloat16MatrixTexture(r);case eu.PACKED_4X1_UNSIGNED_BYTE:return getInternalFormatForUnsignedBytesMatrixTexture(r);default:throw Error(`Unknown physical texture type ${t}`)}}function getPhysicalTextureForRendering(t){return eV.getBool("WEBGL_RENDER_FLOAT32_ENABLED")?t?eu.PACKED_2X2_FLOAT32:eu.UNPACKED_FLOAT32:t?eu.PACKED_2X2_FLOAT16:eu.UNPACKED_FLOAT16}function getPhysicalFromLogicalTextureType(t,r){if(t===el.UPLOAD)return eu.PACKED_2X2_FLOAT32;if(t===el.RENDER||null==t)return getPhysicalTextureForRendering(r);if(t===el.DOWNLOAD||t===el.PIXELS)return eu.PACKED_4X1_UNSIGNED_BYTE;throw Error(`Unknown logical texture type ${t}`)}function getKeyFromTextureShape(t,r,a){return`${t[0]}_${t[1]}_${r}_${a}`}let UnaryOpProgram=class UnaryOpProgram{constructor(t,r){this.variableNames=["A"],this.outputShape=t,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length),this.userCode=` float unaryOperation(float x) { ${r} } void main() { float x = getAAtOutCoords(); float y = unaryOperation(x); setOutput(y); } `}};let p2="if (isnan(x)) return x;",p3="return abs(x);",p4=p2+` return (x < 0.0) ? 0.0 : x; `,p6=p2+` return (x < 0.0) ? 0.0 : min(6.0, x); `,p5="return x;",p8=` vec4 result; result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0); result.g = (x.g >= 0.0) ? x.g : (exp(x.g) - 1.0); result.b = (x.b >= 0.0) ? x.b : (exp(x.b) - 1.0); result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0); return result; `,p7=` vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0))); bvec4 isNaN = isnan(x); result.r = isNaN.r ? x.r : result.r; result.g = isNaN.g ? x.g : result.g; result.b = isNaN.b ? x.b : result.b; result.a = isNaN.a ? x.a : result.a; return result; `,p9=` vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0))); bvec4 isNaN = isnan(x); result.r = isNaN.r ? x.r : result.r; result.g = isNaN.g ? x.g : result.g; result.b = isNaN.b ? x.b : result.b; result.a = isNaN.a ? x.a : result.a; return result; `;let UnaryOpPackedProgram=class UnaryOpPackedProgram{constructor(t,r){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length),this.userCode=` vec4 unaryOperation(vec4 x) { ${r} } void main() { vec4 x = getAAtOutCoords(); vec4 y = unaryOperation(x); setOutput(y); } `}};let UnpackProgram=class UnpackProgram{constructor(t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outputShape=t,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length);const r=t.length,a=getChannels("rc",r),n=getCoordsDataType(r),s=getSourceCoords(r,a),i=a.slice(-2),o=r<=1?"rc":`vec2(${i.join(",")})`;this.userCode=` void main() { ${n} rc = getOutputCoords(); vec4 packedInput = getA(${s}); setOutput(getChannel(packedInput, ${o})); } `}};let he=whereImpl,ht={};function getBinaryCache(t){return t in ht||(ht[t]={}),ht[t]}let hr=eV.getNumber("CPU_HANDOFF_SIZE_THRESHOLD");function numMBBeforeWarning(){return null==eV.global.screen?1024:eV.global.screen.height*eV.global.screen.width*window.devicePixelRatio*600/1024/1024}let MathBackendWebGL=class MathBackendWebGL extends KernelBackend{nextDataId(){return MathBackendWebGL.nextDataId++}constructor(t){let r;if(super(),this.pendingRead=new WeakMap,this.pendingDisposal=new WeakSet,this.dataRefCount=new WeakMap,this.numBytesInGPU=0,this.uploadWaitMs=0,this.downloadWaitMs=0,this.lastGlFlushTime=0,this.warnedAboutMemory=!1,this.pendingDeletes=0,this.disposed=!1,!eV.getBool("HAS_WEBGL"))throw Error("WebGL is not supported on this device");null!=t?(r=t instanceof GPGPUContext?t:new GPGPUContext(getWebGLContext(eV.getNumber("WEBGL_VERSION"),t)),this.binaryCache={},this.gpgpuCreatedLocally=!1):(r=new GPGPUContext(getWebGLContext(eV.getNumber("WEBGL_VERSION"))),this.binaryCache=getBinaryCache(eV.getNumber("WEBGL_VERSION")),this.gpgpuCreatedLocally=!0),this.gpgpu=r,this.canvas=this.gpgpu.gl.canvas,this.textureManager=new TextureManager(this.gpgpu),this.numMBBeforeWarning=numMBBeforeWarning(),this.texData=new DataStorage(this,ay)}numDataIds(){return this.texData.numDataIds()-this.pendingDeletes}writeTexture(t,r,a,n,s,i){let o=this.makeTensorInfo(r,a),l=this.texData.get(o.dataId);l.isPacked=!1,l.texture={texture:t,texShape:[n,s]},l.texShape=[n,s];let u=new EncodeMatrixProgram(getShapeAs3D(r),!1,i),p=this.runWebGLProgram(u,[o],a,[[n,s]]);return p.shape=r,l.texture=null,this.disposeIntermediateTensorInfo(o),p.dataId}write(t,r,a){if((eV.getBool("WEBGL_CHECK_NUMERICAL_PROBLEMS")||eV.getBool("DEBUG"))&&this.checkNumericalProblems(t),"complex64"===a&&null!=t)throw Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");let n={id:this.nextDataId()};return this.texData.set(n,{shape:r,dtype:a,values:t,usage:el.UPLOAD,refCount:1}),n}refCount(t){return this.texData.has(t)?this.texData.get(t).refCount:0}incRef(t){let r=this.texData.get(t);r.refCount++}decRef(t){if(this.texData.has(t)){let r=this.texData.get(t);r.refCount--}}move(t,r,a,n,s){if(eV.getBool("DEBUG")&&this.checkNumericalProblems(r),"complex64"===n)throw Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");this.texData.set(t,{shape:a,dtype:n,values:r,usage:el.UPLOAD,refCount:s})}disposeIntermediateTensorInfo(t){this.disposeData(t.dataId)}readSync(t){let r,a,{values:n,dtype:s,complexTensorInfos:i,slice:o,shape:l,isPacked:u}=this.texData.get(t);if(null!=o){let r;r=u?new UnaryOpPackedProgram(l,p5):new UnaryOpProgram(l,p5);let a=this.runWebGLProgram(r,[{dataId:t,shape:l,dtype:s}],s),n=this.readSync(a.dataId);return this.disposeIntermediateTensorInfo(a),n}if(null!=n)return this.convertAndCacheOnCPU(t);if("string"===s)return n;let p=null!=this.activeTimers;return p&&(r=util_now()),a="complex64"===s?mergeRealAndImagArrays(this.readSync(i.real.dataId),this.readSync(i.imag.dataId)):this.getValuesFromTexture(t),p&&(this.downloadWaitMs+=util_now()-r),this.convertAndCacheOnCPU(t,a)}async read(t){let r,a;if(this.pendingRead.has(t)){let r=this.pendingRead.get(t);return new Promise(t=>r.push(t))}let{values:n,shape:s,slice:i,dtype:o,complexTensorInfos:l,isPacked:u}=this.texData.get(t);if(null!=i){let r;r=u?new UnaryOpPackedProgram(s,p5):new UnaryOpProgram(s,p5);let a=this.runWebGLProgram(r,[{dataId:t,shape:s,dtype:o}],o),n=this.read(a.dataId);return this.disposeIntermediateTensorInfo(a),n}if(null!=n)return this.convertAndCacheOnCPU(t);if(eV.getBool("DEBUG")&&!eV.getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")&&2===eV.getNumber("WEBGL_VERSION"))throw Error("tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and WEBGL_VERSION=2 not yet supported.");let p=null;if("complex64"!==o&&eV.get("WEBGL_BUFFER_SUPPORTED")){r=this.decode(t);let a=this.texData.get(r.dataId);p=this.gpgpu.createBufferFromTexture(a.texture.texture,...getDenseTexShape(s))}if(this.pendingRead.set(t,[]),"complex64"!==o&&await this.gpgpu.createAndWaitForFence(),"complex64"===o){let t=await Promise.all([this.read(l.real.dataId),this.read(l.imag.dataId)]);a=mergeRealAndImagArrays(t[0],t[1])}else if(null==p)a=this.getValuesFromTexture(t);else{let t=sizeFromShape(s);a=this.gpgpu.downloadFloat32MatrixFromBuffer(p,t)}if(null!=r&&this.disposeIntermediateTensorInfo(r),null!=p){let t=this.gpgpu.gl;callAndCheck(t,()=>t.deleteBuffer(p))}let m=this.convertAndCacheOnCPU(t,a),y=this.pendingRead.get(t);return this.pendingRead.delete(t),y.forEach(t=>t(m)),this.pendingDisposal.has(t)&&(this.pendingDisposal.delete(t),this.disposeData(t)&&ay.removeDataId(t,this),this.pendingDeletes--),m}readToGPU(t,r={}){let{values:a,shape:n,slice:s,dtype:i,isPacked:o,texture:l}=this.texData.get(t);if("complex64"===i)throw Error("Does not support reading texture for complex64 dtype.");if(null!=s){let a;a=o?new UnaryOpPackedProgram(n,p5):new UnaryOpProgram(n,p5);let s=this.runWebGLProgram(a,[{dataId:t,shape:n,dtype:i}],i),l=this.readToGPU(s,r);return this.disposeIntermediateTensorInfo(s),l}if(null==l)if(null!=a)throw Error("Data is not on GPU but on CPU.");else throw Error("There is no data on GPU or CPU.");let u=this.decode(t,r.customTexShape);return Object.assign({tensorRef:ay.makeTensorFromTensorInfo(u)},this.texData.get(u.dataId).texture)}bufferSync(t){let r=this.readSync(t.dataId);if("string"===t.dtype)try{let a=r.map(t=>decodeString(t));return buffer(t.shape,t.dtype,a)}catch{throw Error("Failed to decode encoded string bytes into utf-8")}return buffer(t.shape,t.dtype,r)}checkNumericalProblems(t){if(null!=t)for(let r=0;r0}time(t){let r=this.activeTimers,a=[],n=!1;null==this.programTimersStack?(this.programTimersStack=a,n=!0):this.activeTimers.push(a),this.activeTimers=a,t();let s=flatten(this.activeTimers.map(t=>t.query)).filter(t=>null!=t),i=flatten(this.activeTimers.map(t=>t.name)).filter(t=>null!=t);this.activeTimers=r,n&&(this.programTimersStack=null);let o={uploadWaitMs:this.uploadWaitMs,downloadWaitMs:this.downloadWaitMs,kernelMs:null,wallMs:null};return(async()=>{if(eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0){let t=await Promise.all(s);o.kernelMs=sum(t),o.getExtraProfileInfo=()=>t.map((t,r)=>({name:i[r],ms:t})).map(t=>`${t.name}: ${t.ms}`).join(", ")}else o.kernelMs={error:"WebGL query timers are not supported in this environment."};return this.uploadWaitMs=0,this.downloadWaitMs=0,o})()}memory(){return{unreliable:!1,numBytesInGPU:this.numBytesInGPU,numBytesInGPUAllocated:this.textureManager.numBytesAllocated,numBytesInGPUFree:this.textureManager.numBytesFree}}startTimer(){return eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.beginQuery():{startMs:util_now(),endMs:null}}endTimer(t){return eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.endQuery():t.endMs=util_now(),t}async getQueryTime(t){return eV.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.waitForQueryAndGetTime(t):t.endMs-t.startMs}disposeData(t,r=!1){if(this.pendingDisposal.has(t))return!1;if(!this.texData.has(t))return!0;if(r?this.texData.get(t).refCount=0:this.texData.get(t).refCount--,!r&&this.texData.get(t).refCount>0)return!1;if(this.pendingRead.has(t))return this.pendingDisposal.add(t),this.pendingDeletes++,!1;this.releaseGPUData(t);let{complexTensorInfos:a}=this.texData.get(t);return null!=a&&(this.disposeData(a.real.dataId,r),this.disposeData(a.imag.dataId,r)),this.texData.delete(t),!0}releaseGPUData(t){let{texture:r,dtype:a,texShape:n,usage:s,isPacked:i,slice:o}=this.texData.get(t),l=o&&o.origDataId||t,u=this.dataRefCount.get(l);u>1?this.dataRefCount.set(l,u-1):(this.dataRefCount.delete(l),null!=r&&(this.numBytesInGPU-=this.computeBytes(n,a),this.textureManager.releaseTexture(r,n,s,i)));let p=this.texData.get(t);p.texture=null,p.texShape=null,p.isPacked=!1,p.slice=null}getTexture(t){return this.uploadToGPU(t),this.texData.get(t).texture.texture}getDataInfo(t){return this.texData.get(t)}shouldExecuteOnCPU(t,r=hr){return eV.getBool("WEBGL_CPU_FORWARD")&&t.every(t=>null==this.texData.get(t.dataId).texture&&sizeFromShape(t.shape)0&&isString(a[0])){let s=a.map(t=>encodeString(t));n=this.write(s,t,r)}else n=this.write(a,t,r);return this.texData.get(n).usage=null,{dataId:n,shape:t,dtype:r}}makeOutput(t,r,a){return ay.makeTensorFromTensorInfo(this.makeTensorInfo(t,r,a),this)}unpackTensor(t){let r=new UnpackProgram(t.shape);return this.runWebGLProgram(r,[t],t.dtype)}packTensor(t){let r=new PackProgram(t.shape);return this.runWebGLProgram(r,[t],t.dtype,null,!0)}packedReshape(t,r){let a=[getBatchDim(t.shape),...getRowsCols(t.shape)],n={dtype:t.dtype,shape:a,dataId:t.dataId},s=new ReshapePackedProgram([getBatchDim(r),...getRowsCols(r)],a),i=this.runWebGLProgram(s,[n],t.dtype,[a],!0);return{dataId:i.dataId,shape:r,dtype:i.dtype}}decode(t,r){let a,{isPacked:n,shape:s,dtype:i}=this.texData.get(t);null!=r&&assert(sizeFromShape(s)<=r[0]*r[1]*4,()=>"customTexShape is too small. Row * Column * 4 should be equal or larger than the size of the tensor data.");let o=getShapeAs3D(s);a=n?new DecodeMatrixPackedProgram(o):new DecodeMatrixProgram(o);let l=[null!=r?r:getDenseTexShape(o)],u=this.runWebGLProgram(a,[{shape:o,dtype:i,dataId:t}],i,l,!0,r);return{dtype:i,shape:s,dataId:u.dataId}}runWebGLProgram(t,r,a,n,s=!1,i){let o,l=this.makeTensorInfo(t.outputShape,a),u=this.texData.get(l.dataId);if(t.packedOutput&&(u.isPacked=!0),t.outPackingScheme===eo.DENSE&&(u.texShape=(null!=i?i:getDenseTexShape(t.outputShape)).map(t=>2*t)),null!=t.outTexUsage&&(u.usage=t.outTexUsage),0===sizeFromShape(l.shape))return u.values=getArrayFromDType(l.dtype,0),l;let p=[],m=r.map(r=>{if("complex64"===r.dtype)throw Error("GPGPUProgram does not support complex64 input. For complex64 dtypes, please separate the program into real and imaginary parts.");let a=this.texData.get(r.dataId);if(null==a.texture){if(!t.packedInputs&&sizeFromShape(r.shape)<=eV.getNumber("WEBGL_SIZE_UPLOAD_UNIFORM"))return{shape:r.shape,texData:null,isUniform:!0,uniformValues:a.values};t.packedInputs&&(a.isPacked=!0,a.shape=r.shape)}if(this.uploadToGPU(r.dataId),!!a.isPacked!=!!t.packedInputs)r=a.isPacked?this.unpackTensor(r):this.packTensor(r),p.push(r),a=this.texData.get(r.dataId);else if(a.isPacked&&!isReshapeFree(a.shape,r.shape)){let t=r,n=r.shape;r.shape=a.shape,r=this.packedReshape(r,n),p.push(r),a=this.texData.get(r.dataId),t.shape=n}return{shape:r.shape,texData:a,isUniform:!1}});this.uploadToGPU(l.dataId);let y={shape:l.shape,texData:u,isUniform:!1},_=makeShaderKey(t,m,y),w=this.getAndSaveBinary(_,()=>compileProgram(this.gpgpu,t,m,y)),I=null!=this.activeTimers;I&&(o=this.startTimer()),eV.get("ENGINE_COMPILE_ONLY")||runProgram(this.gpgpu,w,m,y,n),p.forEach(t=>this.disposeIntermediateTensorInfo(t)),I&&(o=this.endTimer(o),this.activeTimers.push({name:t.constructor.name,query:this.getQueryTime(o)}));let C=eV.getNumber("WEBGL_FLUSH_THRESHOLD");if(C>0){let t=util_now();t-this.lastGlFlushTime>C&&(this.gpgpu.gl.flush(),this.lastGlFlushTime=t)}if(!eV.getBool("WEBGL_LAZILY_UNPACK")&&u.isPacked&&!1===s){let t=this.unpackTensor(l);return this.disposeIntermediateTensorInfo(l),t}return l}compileAndRun(t,r,a,n,s=!1){return a=a||r[0].dtype,this.runWebGLProgram(t,r,a,n,s)}getAndSaveBinary(t,r){return t in this.binaryCache||(this.binaryCache[t]=r()),this.binaryCache[t]}getTextureManager(){return this.textureManager}dispose(){this.disposed||(eV.getBool("IS_TEST")||Object.keys(this.binaryCache).forEach(t=>{this.gpgpu.deleteProgram(this.binaryCache[t].webGLProgram),delete this.binaryCache[t]}),this.textureManager.dispose(),null!=this.canvas&&"u">typeof HTMLCanvasElement&&this.canvas instanceof HTMLCanvasElement?this.canvas.remove():this.canvas=null,this.gpgpuCreatedLocally&&(this.gpgpu.program=null,this.gpgpu.dispose()),this.disposed=!0)}floatPrecision(){return null==this.floatPrecisionValue&&(this.floatPrecisionValue=globals_tidy(()=>{if(!eV.get("WEBGL_RENDER_FLOAT32_ENABLED")){let t=eV.getBool("DEBUG");eV.set("DEBUG",!1);let r=this.abs(scalar_scalar(1e-8)).dataSync()[0];if(eV.set("DEBUG",t),r>0)return 32}return 16})),this.floatPrecisionValue}epsilon(){return 32===this.floatPrecision()?1e-7:1e-4}uploadToGPU(t){let r,a=this.texData.get(t),{shape:n,dtype:s,values:i,texture:o,usage:l,isPacked:u}=a;if(null!=o)return;let p=null!=this.activeTimers;p&&(r=util_now());let m=a.texShape;if(null==m&&(a.texShape=m=getTextureShapeFromLogicalShape(n,u)),null!=i){let t,o=getShapeAs3D(n),l=m[1],y=m[0],_=i instanceof Uint8Array||i instanceof Uint8ClampedArray;(u||!_)&&([l,y]=getPackedMatrixTextureShapeWidthHeight(m[0],m[1])),t=u?new EncodeMatrixPackedProgram(o,_):new EncodeMatrixProgram(o,_);let w=_?[y,l]:m,I=this.makeTensorInfo(w,s),C=this.texData.get(I.dataId);_?C.usage=el.PIXELS:C.usage=el.UPLOAD,C.texShape=w,this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(I.dataId),l,y,i);let E=[[y,l]],A=this.runWebGLProgram(t,[I],s,E,!0),$=this.texData.get(A.dataId);a.texShape=$.texShape,a.isPacked=$.isPacked,a.usage=$.usage,eV.get("ENGINE_COMPILE_ONLY")?this.disposeData(A.dataId):(a.texture=$.texture,a.values=null,this.texData.delete(A.dataId)),this.disposeIntermediateTensorInfo(I),p&&(this.uploadWaitMs+=util_now()-r)}else a.texture=this.acquireTexture(m,l,s,u)}convertAndCacheOnCPU(t,r){let a=this.texData.get(t),{dtype:n}=a;return null!=r&&(a.values=float32ToTypedArray(r,n)),a.values}acquireTexture(t,r,a,n){if(this.numBytesInGPU+=this.computeBytes(t,a),!this.warnedAboutMemory&&this.numBytesInGPU>1024*this.numMBBeforeWarning*1024){let t=(this.numBytesInGPU/1024/1024).toFixed(2);this.warnedAboutMemory=!0,console.warn(`High memory usage in GPU: ${t} MB, most likely due to a memory leak`)}return this.textureManager.acquireTexture(t,r,n)}computeBytes(t,r){return t[0]*t[1]*bytesPerElement(r)}checkCompileCompletion(){for(let[,t]of Object.entries(this.binaryCache))this.checkCompletion_(t)}async checkCompileCompletionAsync(){let t=[];if(this.gpgpu.parallelCompilationExtension){for(let[,r]of Object.entries(this.binaryCache))t.push(this.checkCompletionAsync_(r));return Promise.all(t)}for(let[,r]of Object.entries(this.binaryCache)){let a=new Promise(t=>{try{this.checkCompletion_(r),t(!0)}catch(t){throw t}});t.push(a)}return Promise.all(t)}async checkCompletionAsync_(t){return this.gpgpu.gl.getProgramParameter(t.webGLProgram,this.gpgpu.parallelCompilationExtension.COMPLETION_STATUS_KHR)?this.checkCompletion_(t):(await nextFrame(),this.checkCompletionAsync_(t))}checkCompletion_(t){if(!1===this.gpgpu.gl.getProgramParameter(t.webGLProgram,this.gpgpu.gl.LINK_STATUS)){if(console.log(this.gpgpu.gl.getProgramInfoLog(t.webGLProgram)),!1===this.gpgpu.gl.getShaderParameter(t.fragmentShader,this.gpgpu.gl.COMPILE_STATUS))throw logShaderSourceAndInfoLog(t.source,this.gpgpu.gl.getShaderInfoLog(t.fragmentShader)),Error("Failed to compile fragment shader.");throw Error("Failed to link vertex and fragment shaders.")}return!0}getUniformLocations(){for(let t of Object.values(this.binaryCache)){this.gpgpu.buildVao(t.webGLProgram);let{variablesLocations:r,customUniformLocations:a,infLoc:n,nanLoc:s,outShapeLocation:i,outShapeStridesLocation:o,outTexShapeLocation:l}=getUniformLocations(this.gpgpu,t.program,t.webGLProgram);t.variablesLocations=r,t.customUniformLocations=a,t.infLoc=n,t.nanLoc=s,t.outShapeLocation=i,t.outShapeStridesLocation=o,t.outTexShapeLocation=l}}createTensorFromGPUData(t,r,a){t.channels=t.channels||"RGBA";let{texture:n,height:s,width:i,channels:o}=t,l=ay.backend;if(!l.gpgpu.gl.isTexture(n))throw Error("The texture is invalid. Also, please make sure the texture and the TFJS WebGL backend are using the same canvas. If you want to use your own custom canvas, you have to create and use the custom TFJS WebGL backend created from the canvas through 'new tf.MathBackendWebGL(customCanvas)'.");let u=l.writeTexture(n,r,a,s,i,o);return ay.makeTensorFromDataId(u,r,a,l)}};function float32ToTypedArray(t,r){if("float32"===r||"complex64"===r)return t;if("int32"===r||"bool"===r){let a="int32"===r?new Int32Array(t.length):new Uint8Array(t.length);for(let r=0;rnew MathBackendWebGL,2);let ha=` if (isnan(a)) return a; if (isnan(b)) return b; `;let BinaryOpProgram=class BinaryOpProgram{constructor(t,r,a){this.variableNames=["A","B"],this.outputShape=assertAndGetBroadcastShape(r,a),this.enableShapeUniforms=useShapeUniforms(this.outputShape.length),this.userCode=` float binaryOperation(float a, float b) { ${t} } void main() { float a = getAAtOutCoords(); float b = getBAtOutCoords(); setOutput(binaryOperation(a, b)); } `}};let hn=` result.r = isNaN.r ? NAN : result.r; result.g = isNaN.g ? NAN : result.g; result.b = isNaN.b ? NAN : result.b; result.a = isNaN.a ? NAN : result.a; `;let BinaryOpPackedProgram=class BinaryOpPackedProgram{constructor(t,r,a,n=!1){this.variableNames=["A","B"],this.supportsBroadcasting=!0,this.packedInputs=!0,this.packedOutput=!0,this.outputShape=assertAndGetBroadcastShape(r,a);const s=this.outputShape.length;this.enableShapeUniforms=useShapeUniforms(s);let i="";if(n)if(0===s||1===sizeFromShape(this.outputShape))i=` result.y = 0.; result.z = 0.; result.w = 0.; `;else{const t=getCoordsDataType(s);if(i=` ${t} coords = getOutputCoords(); `,1===s)this.enableShapeUniforms?i+=` result.y = (coords + 1) >= outShape ? 0. : result.y; result.z = 0.; result.w = 0.; `:i+=` result.y = (coords + 1) >= ${this.outputShape[0]} ? 0. : result.y; result.z = 0.; result.w = 0.; `;else{const t=getChannels("coords",s);this.enableShapeUniforms?i+=` bool nextRowOutOfBounds = (${t[s-2]} + 1) >= outShape[${s} - 2]; bool nextColOutOfBounds = (${t[s-1]} + 1) >= outShape[${s} - 1]; result.y = nextColOutOfBounds ? 0. : result.y; result.z = nextRowOutOfBounds ? 0. : result.z; result.w = nextColOutOfBounds || nextRowOutOfBounds ? 0. : result.w; `:i+=` bool nextRowOutOfBounds = (${t[s-2]} + 1) >= ${this.outputShape[s-2]}; bool nextColOutOfBounds = (${t[s-1]} + 1) >= ${this.outputShape[s-1]}; result.y = nextColOutOfBounds ? 0. : result.y; result.z = nextRowOutOfBounds ? 0. : result.z; result.w = nextColOutOfBounds || nextRowOutOfBounds ? 0. : result.w; `}}this.userCode=` vec4 binaryOperation(vec4 a, vec4 b) { ${t} } void main() { vec4 a = getAAtOutCoords(); vec4 b = getBAtOutCoords(); vec4 result = binaryOperation(a, b); ${i} setOutput(result); } `}};function kernels_Identity_identity(t){let{inputs:r,backend:a}=t,{x:n}=r;return a.incRef(n.dataId),{dataId:n.dataId,shape:n.shape,dtype:n.dtype}}function kernels_Complex_complex(t){let{inputs:r,backend:a}=t,{real:n,imag:s}=r,i=a.makeTensorInfo(n.shape,"complex64");return a.texData.get(i.dataId).complexTensorInfos={real:kernels_Identity_identity({inputs:{x:n},backend:a}),imag:kernels_Identity_identity({inputs:{x:s},backend:a})},i}let hs="return (a < 0.) ? b * a : a;",hi=` vec4 aLessThanZero = vec4(lessThan(a, vec4(0.))); return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a); `;function kernels_LeakyRelu_leakyRelu(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{alpha:i}=n,o=a.makeTensorInfo([],"float32",createScalarValue(i,"float32")),l=eV.getBool("WEBGL_PACK_BINARY_OPERATIONS")?new BinaryOpPackedProgram(hi,s.shape,o.shape):new BinaryOpProgram(hs,s.shape,o.shape),u=a.runWebGLProgram(l,[s,o],"float32");return a.disposeIntermediateTensorInfo(o),u}let ho="return (a < 0.) ? b * a : a;",hl=` vec4 aLessThanZero = vec4(lessThan(a, vec4(0.))); return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a); `;function kernels_Prelu_prelu(t){let{inputs:r,backend:a}=t,{x:n,alpha:s}=r,i=eV.getBool("WEBGL_PACK_BINARY_OPERATIONS")?new BinaryOpPackedProgram(hl,n.shape,s.shape):new BinaryOpProgram(ho,n.shape,s.shape);return a.runWebGLProgram(i,[n,s],"float32")}let hu="if (isnan(x)) return x;";function kernel_funcs_utils_unaryKernelFunc({opSnippet:t,packedOpSnippet:r,cpuKernelImpl:a,dtype:n}){return({inputs:s,backend:i})=>{let o,{x:l}=s,u=n||l.dtype;if(i.shouldExecuteOnCPU([l])&&null!=a){let t=a(i.texData.get(l.dataId).values,u);return i.makeTensorInfo(l.shape,u,t)}return o=eV.getBool("WEBGL_PACK_UNARY_OPERATIONS")&&null!=r?new UnaryOpPackedProgram(l.shape,r):new UnaryOpProgram(l.shape,t),i.runWebGLProgram(o,[l],u)}}function kernel_funcs_utils_binaryKernelFunc({opSnippet:t,packedOpSnippet:r,checkOutOfBounds:a=!1,supportsComplex:n=!1,cpuKernelImpl:s,dtype:i}){return({inputs:o,backend:l})=>{let u,{a:p,b:m}=o;if(n&&"complex64"===p.dtype){let r=l.texData.get(p.dataId),a=l.texData.get(m.dataId),[n,s]=[[r.complexTensorInfos.real,a.complexTensorInfos.real],[r.complexTensorInfos.imag,a.complexTensorInfos.imag]].map(r=>{let[a,n]=r,s={dataId:a.dataId,dtype:a.dtype,shape:p.shape},i={dataId:n.dataId,dtype:n.dtype,shape:m.shape},o=new BinaryOpProgram(t,p.shape,m.shape);return l.runWebGLProgram(o,[s,i],upcastType(a.dtype,n.dtype))}),i=kernels_Complex_complex({inputs:{real:n,imag:s},backend:l});return l.disposeIntermediateTensorInfo(n),l.disposeIntermediateTensorInfo(s),i}let y=i||upcastType(p.dtype,m.dtype);if(("string"===p.dtype||"string"===m.dtype||l.shouldExecuteOnCPU([p,m]))&&null!=s){let t=l.texData.get(p.dataId).values,r=l.texData.get(m.dataId).values,a="string"===p.dtype?fromUint8ToStringArray(t):t,n="string"===p.dtype?fromUint8ToStringArray(r):r,[i,o]=s(p.shape,m.shape,a,n,y),u=l.makeTensorInfo(o,y);return l.texData.get(u.dataId).values=i,u}return u=eV.getBool("WEBGL_PACK_BINARY_OPERATIONS")&&null!=r?new BinaryOpPackedProgram(r,p.shape,m.shape,a):new BinaryOpProgram(t,p.shape,m.shape),l.runWebGLProgram(u,[p,m],y)}}function mapActivationToShaderProgram(t,r=!1){if("linear"===t)return"return x;";if("relu"===t)return r?p7:p4;if("elu"===t)return r?p8:"return (x >= 0.0) ? x : (exp(x) - 1.0);";if("relu6"===t)return r?p9:p6;if("prelu"===t)return r?hl:ho;else if("leakyrelu"===t)return r?hi:hs;else if("sigmoid"===t)return"return 1.0 / (1.0 + exp(-1.0 * x));";throw Error(`Activation ${t} has not been implemented for the WebGL backend.`)}let MatMulPackedProgram=class MatMulPackedProgram{constructor(t,r,a,n=!1,s=!1,i=!1,o=null,l=!1,u=!1){this.variableNames=["matrixA","matrixB"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=a,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length);const p=Math.ceil((n?t[1]:t[2])/2),m=n?["a.xxyy","a.zzww"]:["a.xxzz","a.yyww"],y=s?["b.xzxz","b.ywyw"]:["b.xyxy","b.zwzw"];let _="",w="";o&&(_=l?`vec4 activation(vec4 a) { vec4 b = getPreluActivationWeightsAtOutCoords(); ${o} }`:u?`vec4 activation(vec4 a) { vec4 b = getLeakyreluAlphaAtOutCoords(); ${o} }`:`vec4 activation(vec4 x) { ${o} }`,w="result = activation(result);"),i&&this.variableNames.push("bias"),l&&this.variableNames.push("preluActivationWeights"),u&&this.variableNames.push("leakyreluAlpha");let I="rc.x",C="rc.x";t[0]`The new shape (${l}) has ${u} elements and the old shape (${s.shape}) has ${o} elements. The new shape and old shape must have the same number of elements.`);let p=a.texData.get(s.dataId);return!p.isPacked||isReshapeFree(s.shape,l)||null!==p.texture&&isReshapeFree(p.shape,l)?(a.incRef(s.dataId),{dataId:s.dataId,shape:l,dtype:s.dtype}):packedReshape(s,l,a)}let MeanProgram=class MeanProgram{constructor(t,r){this.variableNames=["x"];const{windowSize:a,batchSize:n,inSize:s,outSize:i}=t;this.outputShape=[n,i];const o=4*Math.floor(a/4),l=a%4;let u="sumValue += dot(values, ones);";if(null!=r){const t=1/r;u=`sumValue += dot(values * ${t%1==0?t.toPrecision(2):t}, ones);`}let p="";s%a>0&&(p=` if (inIdx < 0 || inIdx >= ${s}) { return 0.0; } `),this.userCode=` const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float getValue(int batch, int inIdx) { ${p} return getX(batch, inIdx); } void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; int outIdx = coords[1]; int inOffset = outIdx * ${a}; float sumValue = 0.0; for (int i = 0; i < ${o}; i += 4) { int inIdx = inOffset + i; vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), getValue(batch, inIdx + 3) ); ${u} } int inIdx = inOffset + ${o}; if (${1===l}) { vec4 values = vec4(getValue(batch, inIdx), 0.0, 0.0, 0.0); ${u} } else if (${2===l}) { vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), 0.0, 0.0); ${u} } else if (${3===l}) { vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), 0.0); ${u} } setOutput(sumValue); } `}};let ReduceProgram=class ReduceProgram{constructor(t,r){this.variableNames=["x"];const{windowSize:a,batchSize:n,inSize:s,outSize:i}=t;this.outputShape=[n,i];let o="0.0",l="";"prod"===r?o="1.0":"min"===r?(o="1.0 / 1e-20",l="min"):"max"===r&&(o="-1.0 / 1e-20",l="max");let u=`${r}(${r}(${r}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;"sum"===r?u="sumValue":"prod"===r?u="prodValue":"all"===r?u="allValue":"any"===r&&(u="anyValue");const p=4*Math.floor(a/4),m=a%4;let y=` if (${"sum"===r}) { sumValue += dot(values, ones); } else if (${"prod"===r}) { vec2 tmp = vec2(values[0], values[1]) * vec2(values[2], values[3]); prodValue *= tmp[0] * tmp[1]; } else { minMaxValue = ${l}(values, minMaxValue); if (${"min"===r} || ${"max"===r}) { minMaxValue = ${l}(values, minMaxValue); bvec4 isNaN = isnan(values); if (isNaN.r || isNaN.g || isNaN.b || isNaN.a) { minMaxValue = vec4(NAN); } } } `,_="vec4";"all"===r?(o="1.0",y=` bool reducedAllValue = all(values); float floatedReducedAllValue = float(reducedAllValue); allValue = float(allValue >= 1.0 && floatedReducedAllValue >= 1.0); `,_="bvec4"):"any"===r&&(o="0.0",y=` bool reducedAnyValue = any(values); float floatedReducedAnyValue = float(reducedAnyValue); anyValue = float(anyValue >= 1.0 || floatedReducedAnyValue >= 1.0); `,_="bvec4");let w="";s%a>0&&(w=` if (inIdx < 0 || inIdx >= ${s}) { return initializationValue; } `),this.userCode=` const float initializationValue = ${o}; const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float getValue(int batch, int inIdx) { ${w} return getX(batch, inIdx); } void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; int outIdx = coords[1]; int inOffset = outIdx * ${a}; vec4 minMaxValue = vec4(${o}); float prodValue = 1.0; float sumValue = 0.0; float allValue = 1.0; float anyValue = 0.0; for (int i = 0; i < ${p}; i += 4) { int inIdx = inOffset + i; ${_} values = ${_}( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), getValue(batch, inIdx + 3) ); ${y} } int inIdx = inOffset + ${p}; if (${1===m}) { ${_} values = ${_}( getValue(batch, inIdx), initializationValue, initializationValue, initializationValue ); ${y} } else if (${2===m}) { ${_} values = ${_}( getValue(batch, inIdx), getValue(batch, inIdx + 1), initializationValue, initializationValue ); ${y} } else if (${3===m}) { ${_} values = ${_}( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), initializationValue ); ${y} } setOutput(${u}); } `}};function getReductionStages(t){let r=[];for(;0===r.length||1!==r[r.length-1].outSize;){let a=r.length?r[r.length-1].outSize:t[1],n=computeOptimalWindowSize(a);r.push({inSize:a,windowSize:n,outSize:Math.ceil(a/n)})}return r}function reduce(t,r,a,n){let s=getReductionStages(t.shape),i=t;for(let o=0;o6)throw Error(`Transpose for rank ${r} is not yet supported`);let a=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u","resRC.v"],n=Array(r);for(let r=0;r6)throw Error(`Packed transpose for rank ${this.rank} is not yet supported.`);const n=getCoordsDataType(this.rank),s=getVecChannels("rc",this.rank),i=Array(this.rank);for(let t=0;t`Error in matMul: inner shapes (${_}) and (${w}) of Tensors with shapes ${t.shape} and ${r.shape} and transposeA=${a} and transposeB=${n} must match.`);let P=a?[$,_,I]:[$,I,_],L=n?[F,C,w]:[F,w,C],z=kernels_Reshape_reshape({inputs:{x:t},backend:s,attrs:{shape:P}}),B=kernels_Reshape_reshape({inputs:{x:r},backend:s,attrs:{shape:L}}),G=[z,B],j=Math.max($,F),K=a?z.shape[1]:z.shape[2],H=null!=i,q=null!=o,Z="leakyrelu"===u,Q=null!=u?mapActivationToShaderProgram(u,!0):null,ee=H||q||Z||null!=Q;if((1===I||1===C)&&K>1e3&&!1===ee){let t=z,r=B;a&&(t=kernels_Transpose_transpose({inputs:{x:z},backend:s,attrs:{perm:[0,2,1]}}),G.push(t)),n&&(r=kernels_Transpose_transpose({inputs:{x:B},backend:s,attrs:{perm:[0,2,1]}}),G.push(r));let i=1!==C,o=1===C,l=t;i&&(l=kernels_Reshape_reshape({inputs:{x:t},backend:s,attrs:{shape:[j,K,1]}}),G.push(l));let u=r;o&&(u=kernels_Reshape_reshape({inputs:{x:r},backend:s,attrs:{shape:[j,1,K]}}),G.push(u));let m=kernels_Multiply_multiply({inputs:{a:l,b:u},backend:s});p=kernels_Sum_sum({inputs:{x:m},backend:s,attrs:{axis:1===C?2:1,keepDims:!0}}),G.push(m)}else{let u=upcastType(t.dtype,r.dtype),m=new MatMulPackedProgram(P,L,[j,I,C],a,n,H,Q,q,Z),y=[z,B];if(null!=i&&y.push(i),q&&y.push(o),Z){let t=s.makeTensorInfo([],"float32",createScalarValue(l,"float32"));y.push(t),G.push(t)}p=s.runWebGLProgram(m,y,u)}let et=kernels_Reshape_reshape({inputs:{x:p},backend:s,attrs:{shape:D}});for(let t of(G.push(p),G))s.disposeIntermediateTensorInfo(t);return et}function _FusedMatMul_fusedMatMul(t){let{inputs:r,backend:a,attrs:n}=t,{a:s,b:i,bias:o,preluActivationWeights:l}=r,{transposeA:u,transposeB:p,activation:m,leakyreluAlpha:y}=n;return batchMatMulImpl({a:s,b:i,transposeA:u,transposeB:p,backend:a,bias:o,preluActivationWeights:l,leakyreluAlpha:y,activation:m})}let hh="return abs(x);";function kernels_Abs_abs(t){let r,{inputs:a,backend:n}=t,{x:s}=a;if(n.shouldExecuteOnCPU([s])&&"complex64"!==s.dtype){let t=pV(n.texData.get(s.dataId).values);return n.makeTensorInfo(s.shape,s.dtype,t)}return r=eV.getBool("WEBGL_PACK_UNARY_OPERATIONS")?new UnaryOpPackedProgram(s.shape,hh):new UnaryOpProgram(s.shape,hh),n.runWebGLProgram(r,[s],s.dtype)}let hc=kernel_funcs_utils_unaryKernelFunc({opSnippet:p2+` if (abs(x) > 1.) { return NAN; } return acos(x); `}),hd=kernel_funcs_utils_unaryKernelFunc({opSnippet:p2+` if (x < 1.0) return NAN; return log(x + sqrt(x * x - 1.0));`}),hm="return a + b;",hf=kernel_funcs_utils_binaryKernelFunc({opSnippet:hm,packedOpSnippet:hm,supportsComplex:!0,cpuKernelImpl:pi});let AddNProgram=class AddNProgram{constructor(t,r){this.outputShape=[],this.outputShape=t,this.variableNames=r.map((t,r)=>`T${r}`);const a=[];this.variableNames.forEach(t=>{a.push(`float v${t} = get${t}AtOutCoords();`)});const n=this.variableNames.map(t=>`v${t}`).join(" + ");this.userCode=` void main() { ${a.join(` `)} float result = ${n}; setOutput(result); } `}};let AddNPackedProgram=class AddNPackedProgram{constructor(t,r){this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t,this.variableNames=r.map((t,r)=>`T${r}`);const a=[];this.variableNames.forEach(t=>{a.push(`vec4 v${t} = get${t}AtOutCoords();`)});const n=this.variableNames.map(t=>`v${t}`).join(" + ");this.userCode=` void main() { ${a.join(` `)} vec4 result = ${n}; setOutput(result); } `}};function kernels_AddN_addN(t){let{inputs:r,backend:a}=t;if(1===r.length)return kernels_Identity_identity({inputs:{x:r[0]},backend:a});if(r.length>eV.getNumber("WEBGL_MAX_TEXTURES_IN_SHADER")){let t=Math.floor(r.length/2),n=kernels_AddN_addN({inputs:r.slice(0,t),backend:a}),s=kernels_AddN_addN({inputs:r.slice(t),backend:a});return kernels_AddN_addN({inputs:[n,s],backend:a})}let n=r.map(t=>t.dtype).reduce((t,r)=>upcastType(t,r)),s=r.map(t=>t.shape),i=eV.getBool("WEBGL_PACK")?new AddNPackedProgram(r[0].shape,s):new AddNProgram(r[0].shape,s);return a.runWebGLProgram(i,r,n)}function kernels_All_all(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a,{axis:o,keepDims:l}=s,u=i.shape.length,p=parseAxisParam(o,i.shape),m=p,y=getAxesPermutation(m,u),_=i;null!=y&&(_=kernels_Transpose_transpose({inputs:{x:i},backend:n,attrs:{perm:y}}),m=getInnerMostAxes(m.length,u)),assertAxesAreInnerMostDims("all",m,u);let[w,I]=computeOutAndReduceShapes(_.shape,m),C=kernels_Reshape_reshape({inputs:{x:_},backend:n,attrs:{shape:[-1,sizeFromShape(I)]}}),E=reduce(C,C.dtype,"all",n);return r=l?kernels_Reshape_reshape({inputs:{x:E},backend:n,attrs:{shape:expandShapeToKeepDim(w,p)}}):kernels_Reshape_reshape({inputs:{x:E},backend:n,attrs:{shape:w}}),n.disposeIntermediateTensorInfo(C),n.disposeIntermediateTensorInfo(E),null!=y&&n.disposeIntermediateTensorInfo(_),r}function kernels_Any_any(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a,{axis:o,keepDims:l}=s,u=i.shape.length,p=parseAxisParam(o,i.shape),m=p,y=getAxesPermutation(m,u),_=i;null!=y&&(_=kernels_Transpose_transpose({inputs:{x:i},backend:n,attrs:{perm:y}}),m=getInnerMostAxes(m.length,u)),assertAxesAreInnerMostDims("any",m,u);let[w,I]=computeOutAndReduceShapes(_.shape,m),C=kernels_Reshape_reshape({inputs:{x:_},backend:n,attrs:{shape:[-1,sizeFromShape(I)]}}),E=reduce(C,C.dtype,"any",n);return r=l?kernels_Reshape_reshape({inputs:{x:E},backend:n,attrs:{shape:expandShapeToKeepDim(w,p)}}):kernels_Reshape_reshape({inputs:{x:E},backend:n,attrs:{shape:w}}),n.disposeIntermediateTensorInfo(C),n.disposeIntermediateTensorInfo(E),null!=y&&n.disposeIntermediateTensorInfo(_),r}let ArgMinMaxProgram=class ArgMinMaxProgram{constructor(t,r,a){this.variableNames=["A"];const{windowSize:n,batchSize:s,outSize:i}=t;a||this.variableNames.push("bestIndicesA"),this.outputShape=[s,i],this.userCode=` void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; int outIdx = coords[1]; int inOffset = outIdx * ${n}; int bestIndex = inOffset; float bestValue = getA(batch, bestIndex); for (int i = 0; i < ${n}; i++) { int inIdx = ${a?"inOffset + i;":"round(getBestIndicesA(batch, inOffset + i));"}; float candidate = getA(batch, inIdx); if (candidate ${"max"===r?">":"<"} bestValue) { bestValue = candidate; bestIndex = inIdx; } } setOutput(float(bestIndex)); } `}};let ArgMinMaxPackedProgram=class ArgMinMaxPackedProgram{constructor(t,r,a,n){let s,i;this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,assert(t.length>2,()=>`Packed arg${a.charAt(0).toUpperCase()+a.slice(1)} supports only inputs with rank above 2.`);const o=Math.ceil(t[t.length-1]/r);this.outputShape=t.slice(0,-1),o>1&&this.outputShape.push(o),n||this.variableNames.push("bestIndicesA");const l=this.outputShape,u=l.length,p=getCoordsDataType(u),m=getChannels("coords",u);if(1===o){const t=getCoordsDataType(i=u+1);s=` ${t} sourceLocR = ${t}(${m.join()}, 0); ++${m[u-1]}; ${t} sourceLocG = ${t}(${m.join()}, 0); ++${m[u-2]}; ${t} sourceLocA = ${t}(${m.join()}, 0); --${m[u-1]}; ${t} sourceLocB = ${t}(${m.join()}, 0); --${m[u-2]};`}else i=u,s=` ${p} sourceLocR = coords; ++${m[u-1]}; ${p} sourceLocG = coords; ++${m[u-2]}; ${p} sourceLocA = coords; --${m[u-1]}; ${p} sourceLocB = coords; --${m[u-2]};`;const y=["x","y","z","w","u","v"].slice(0,i),_="."+y[i-1],w=y.map(t=>"int "+t),I=getChannels("sourceLocR",i-1).concat("inIdx.r"),C=getChannels("sourceLocG",i-1).concat("inIdx.g"),E=getChannels("sourceLocB",i-1).concat("inIdx.b"),A=getChannels("sourceLocA",i-1).concat("inIdx.a"),$="max"===a?"greaterThan":"lessThan",F=n?"":` inIdx = round(vec4(getBestIndicesAChannel(${I.join()}), getBestIndicesAChannel(${C.join()}), getBestIndicesAChannel(${E.join()}), getBestIndicesAChannel(${A.join()})));`,D=`vec4( getAChannel(${I.join()}), hasNextCol ? getAChannel(${C.join()}) : 0., hasNextRow ? getAChannel(${E.join()}) : 0., hasNextRow && hasNextCol ? getAChannel(${A.join()}) : 0.)`,P=n?"":` float getBestIndicesAChannel(${w.join()}) { return getChannel(getBestIndicesA(${y.join()}), vec2(${y.slice(-2).join()})); }`;this.userCode=` float getAChannel(${w.join()}) { return getChannel(getA(${y.join()}), vec2(${y.slice(-2).join()})); } ${P} void main() { ${p} coords = getOutputCoords(); bool hasNextCol = ${m[u-1]} < ${l[u-1]-1}; bool hasNextRow = ${m[u-2]} < ${l[u-2]-1}; ${s} ivec4 srcIdx = ivec4(sourceLocR${_}, sourceLocG${_}, sourceLocB${_}, sourceLocA${_}) * ${r}; ivec4 inIdx = srcIdx; vec4 bestIndex = vec4(inIdx); vec4 bestValue = ${D}; for (int i = 0; i < ${r}; i++) { inIdx = srcIdx; ${F} vec4 candidate = ${D}; bvec4 nan = isnan(candidate); bvec4 replace = bvec4( vec4(${$}(candidate, bestValue)) * (vec4(1.0) - vec4(nan))); bestValue = vec4(replace.x ? candidate.x : bestValue.x, replace.y ? candidate.y : bestValue.y, replace.z ? candidate.z : bestValue.z, replace.w ? candidate.w : bestValue.w); bestIndex = mix(bestIndex, vec4(inIdx), vec4(replace)); srcIdx++; } setOutput(bestIndex); } `}};function argReduce(t,r,a,n=null){let s=r.shape[0],i=r.shape[1];null!=n&&(s=n.shape[0],i=n.shape[1]);let o=computeOptimalWindowSize(i),l=new ArgMinMaxProgram({windowSize:o,inSize:i,batchSize:s,outSize:Math.ceil(i/o)},a,null==n),u=[r];null!=n&&u.push(n);let p=t.runWebGLProgram(l,u,"int32");if(1===p.shape[1])return p;let m=argReduce(t,r,a,p);return t.disposeIntermediateTensorInfo(p),m}function argReducePacked(t,r,a,n=null){let s=null!=n?n.shape:r.shape,i=computeOptimalWindowSize(s[s.length-1]),o=new ArgMinMaxPackedProgram(s,i,a,null==n),l=null==n?[r]:[r,n],u=t.runWebGLProgram(o,l,"int32");if(u.shape.length===r.shape.length){let n=argReducePacked(t,r,a,u);return t.disposeIntermediateTensorInfo(u),n}return u}function argMinMaxReduce(t,r,a,n){let s=[a];if(assertAxesAreInnerMostDims("arg"+n.charAt(0).toUpperCase()+n.slice(1),s,r.shape.length),!eV.getBool("WEBGL_PACK_REDUCE")||r.shape.length<=2){let a=[],i=t.texData.get(r.dataId),o=null!==i&&i.isPacked,l=r;o&&a.push(l=t.unpackTensor(r));let[u,p]=computeOutAndReduceShapes(l.shape,s),m=kernels_Reshape_reshape({inputs:{x:l},backend:t,attrs:{shape:[-1,sizeFromShape(p)]}});a.push(m);let y=argReduce(t,m,n);a.push(y);let _=kernels_Reshape_reshape({inputs:{x:y},backend:t,attrs:{shape:u}});return a.forEach(r=>t.disposeIntermediateTensorInfo(r)),_}return argReducePacked(t,r,n)}function kernels_ArgMax_argMax(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{axis:i}=n,o=parseAxisParam(i,s.shape),l=getAxesPermutation(o,s.shape.length),u=s,p=[];null!=l&&(p.push(u=kernels_Transpose_transpose({inputs:{x:s},backend:a,attrs:{perm:l}})),o=getInnerMostAxes(o.length,u.shape.length)),assertAxesAreInnerMostDims("argMax",[o[0]],u.shape.length);let m=argMinMaxReduce(a,u,o[0],"max");return p.forEach(t=>a.disposeIntermediateTensorInfo(t)),m}function kernels_ArgMin_argMin(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{axis:i}=n,o=parseAxisParam(i,s.shape),l=getAxesPermutation(o,s.shape.length),u=s,p=[];null!=l&&(p.push(u=kernels_Transpose_transpose({inputs:{x:s},backend:a,attrs:{perm:l}})),o=getInnerMostAxes(o.length,u.shape.length)),assertAxesAreInnerMostDims("argMin",[o[0]],u.shape.length);let m=argMinMaxReduce(a,u,o[0],"min");return p.forEach(t=>a.disposeIntermediateTensorInfo(t)),m}let hg=kernel_funcs_utils_unaryKernelFunc({opSnippet:p2+` if (abs(x) > 1.) { return NAN; } return asin(x); `}),hy=kernel_funcs_utils_unaryKernelFunc({opSnippet:p2+"return log(x + sqrt(x * x + 1.0));"}),hb=kernel_funcs_utils_unaryKernelFunc({opSnippet:p2+` return atan(x); `}),hx=kernel_funcs_utils_binaryKernelFunc({opSnippet:ha+` return atan(a, b); `,packedOpSnippet:` vec4 result = atan(a, b); bvec4 isNaNA = isnan(a); bvec4 isNaNB = isnan(b); bvec4 isNaN = bvec4(isNaNA.x || isNaNB.x, isNaNA.y || isNaNB.y, isNaNA.z || isNaNB.z, isNaNA.w || isNaNB.w); `+hn+` return result; `}),hv=kernel_funcs_utils_unaryKernelFunc({opSnippet:p2+` if ((x < -1.0) || (x > 1.0)) return NAN; return (log(1.0 + x) - log(1.0 - x)) / 2.0;`});let Pool2DProgram=class Pool2DProgram{constructor(t,r,a,n=!1,s=!1){if(this.variableNames=["x"],"avg"===r&&a)throw Error("Cannot compute positions for average pool.");const i=t.filterWidth,o=t.strideHeight,l=t.strideWidth,u=t.dilationHeight,p=t.dilationWidth,m=t.effectiveFilterHeight,y=t.effectiveFilterWidth,_=t.padInfo.top,w=t.padInfo.left;this.outputShape=t.outShape;const I="avg"===r,C=`((batch * ${t.inHeight} + xR) * ${t.inWidth} + xC) * ${t.inChannels} + d`,E=`(xR * ${t.inWidth} + xC) * ${t.inChannels} + d`;let A="0.0";if(I||(A="-1.0 / 1e-20"),a){this.userCode=` const ivec2 strides = ivec2(${o}, ${l}); const ivec2 pads = ivec2(${_}, ${w}); void main() { ivec4 coords = getOutputCoords(); int batch = coords[0]; int d = coords[3]; ivec2 xRCCorner = coords.yz * strides - pads; int xRCorner = xRCCorner.x; int xCCorner = xRCCorner.y; // max/min x(?, ?, d) to get y(yR, yC, d). // ? = to be determined float minMaxValue = 0.0; float minMaxValueFound = 0.0; int minMaxPosition = 0; float avgValue = 0.0; for (int wR = 0; wR < ${m}; wR += ${u}) { int xR = xRCorner + wR; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int wC = 0; wC < ${y}; wC += ${p}) { int xC = xCCorner + wC; if (xC < 0 || xC >= ${t.inWidth}) { continue; } float value = getX(batch, xR, xC, d); // If a min / max value has already been found, use it. If not, // use the current value. float currMinMaxValue = mix( value, minMaxValue, minMaxValueFound); if (value >= currMinMaxValue) { minMaxValue = value; minMaxValueFound = 1.0; minMaxPosition = ${n?s?C:E:`wR * ${y} + wC`}; } } } setOutput(float(minMaxPosition)); } `;return}let $=`${r}(${r}(${r}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;"avg"===r&&($="avgValue / max(count, 1.0)");const F=4*Math.floor(i/4),D=i%4,P=` if (${I}) { avgValue += dot(values, ones); } else { minMaxValue = max(values, minMaxValue); } `;this.userCode=` const ivec2 strides = ivec2(${o}, ${l}); const ivec2 pads = ivec2(${_}, ${w}); const float initializationValue = ${A}; const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float count = 0.0; float getValue(int batch, int xR, int xC, int d) { if (xC < 0 || xC >= ${t.inWidth}) { return initializationValue; } count += 1.0; return getX(batch, xR, xC, d); } void main() { ivec4 coords = getOutputCoords(); int batch = coords[0]; int d = coords[3]; ivec2 xRCCorner = coords.yz * strides - pads; int xRCorner = xRCCorner.x; int xCCorner = xRCCorner.y; // max/min x(?, ?, d) to get y(yR, yC, d). // ? = to be determined vec4 minMaxValue = vec4(${A}); float avgValue = 0.0; count = 0.0; for (int wR = 0; wR < ${m}; wR += ${u}) { int xR = xRCorner + wR; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int wC = 0; wC < ${F}; wC += 4) { int xC = xCCorner + wC * ${p}; vec4 values = vec4( getValue(batch, xR, xC, d), getValue(batch, xR, xC + ${p}, d), getValue(batch, xR, xC + 2 * ${p}, d), getValue(batch, xR, xC + 3 * ${p}, d) ); ${P} } int xC = xCCorner + ${F}; if (${1===D}) { vec4 values = vec4( getValue(batch, xR, xC, d), initializationValue, initializationValue, initializationValue ); ${P} } else if (${2===D}) { vec4 values = vec4( getValue(batch, xR, xC, d), getValue(batch, xR, xC + ${p}, d), initializationValue, initializationValue ); ${P} } else if (${3===D}) { vec4 values = vec4( getValue(batch, xR, xC, d), getValue(batch, xR, xC + ${p}, d), getValue(batch, xR, xC + 2 * ${p}, d), initializationValue ); ${P} } } setOutput(${$}); } `}};let Pool3DProgram=class Pool3DProgram{constructor(t,r,a,n=!1,s=!1){if(this.variableNames=["x"],"avg"===r&&a)throw Error("Cannot compute positions for average pool.");const i=t.filterWidth,o=t.strideDepth,l=t.strideHeight,u=t.strideWidth,p=t.dilationDepth,m=t.dilationHeight,y=t.dilationWidth,_=t.effectiveFilterDepth,w=t.effectiveFilterHeight,I=t.effectiveFilterWidth,C=t.padInfo.front,E=t.padInfo.top,A=t.padInfo.left;this.outputShape=t.outShape;const $="avg"===r;let F="0.0";if($||(F="-1.0 / 1e-20"),a){this.userCode=` const ivec3 strides = ivec3(${o}, ${l}, ${u}); const ivec3 pads = ivec3(${C}, ${E}, ${A}); void main() { ivec5 coords = getOutputCoords(); int batch = coords.x; int ch = coords.u; ivec3 xCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads; int xDCorner = xCorner.x; int xRCorner = xCorner.y; int xCCorner = xCorner.z; // max/min x(?, ?, ?, ch) to get y(yD, yR, yC, ch). // ? = to be determined float minMaxValue = 0.0; float minMaxValueFound = 0.0; int minMaxPosition = 0; for (int wD = 0; wD < ${_}; wD += ${p}) { int xD = xDCorner + wD; if (xD < 0 || xD >= ${t.inDepth}) { continue; } for (int wR = 0; wR < ${w}; wR += ${m}) { int xR = xRCorner + wR; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int wC = 0; wC < ${I}; wC += ${y}) { int xC = xCCorner + wC; if (xC < 0 || xC >= ${t.inWidth}) { continue; } float value = getX(batch, xD, xR, xC, ch); // If a min / max value has already been found, use it. If not, // use the current value. float currMinMaxValue = mix( value, minMaxValue, minMaxValueFound); if (value >= currMinMaxValue) { minMaxValue = value; minMaxValueFound = 1.0; minMaxPosition = ${n?s?`(((batch * ${t.inDepth} + xD) * ${t.inHeight} + xR) * ${t.inWidth} + xC) * ${t.inChannels} + ch`:`((xD * ${t.inHeight} + xR) * ${t.inWidth} + xC) * ${t.inChannels} + ch`:`wD * ${w} * ${I} + wR * ${I} + wC`}; } } } } setOutput(float(minMaxPosition)); } `;return}let D=`${r}(${r}(${r}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;"avg"===r&&(D="avgValue / max(count, 1.0)");const P=4*Math.floor(i/4),L=i%4,z=` if (${$}) { avgValue += dot(values, ones); } else { minMaxValue = max(values, minMaxValue); } `;this.userCode=` const ivec3 strides = ivec3(${o}, ${l}, ${u}); const ivec3 pads = ivec3(${C}, ${E}, ${A}); const float initializationValue = ${F}; const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float count = 0.0; float getValue(int batch, int xD, int xR, int xC, int ch) { if (xC < 0 || xC >= ${t.inWidth}) { return initializationValue; } count += 1.0; return getX(batch, xD, xR, xC, ch); } void main() { ivec5 coords = getOutputCoords(); int batch = coords.x; int ch = coords.u; ivec3 xCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads; int xDCorner = xCorner.x; int xRCorner = xCorner.y; int xCCorner = xCorner.z; // max/min x(?, ?, ?, d) to get y(yD, yR, yC, ch). // ? = to be determined vec4 minMaxValue = vec4(${F}); float avgValue = 0.0; count = 0.0; for (int wD = 0; wD < ${_}; wD += ${p}) { int xD = xDCorner + wD; if (xD < 0 || xD >= ${t.inDepth}) { continue; } for (int wR = 0; wR < ${w}; wR += ${m}) { int xR = xRCorner + wR; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int wC = 0; wC < ${P}; wC += 4) { int xC = xCCorner + wC * ${y}; vec4 values = vec4( getValue(batch, xD, xR, xC, ch), getValue(batch, xD, xR, xC + ${y}, ch), getValue(batch, xD, xR, xC + 2 * ${y}, ch), getValue(batch, xD, xR, xC + 3 * ${y}, ch) ); ${z} } int xC = xCCorner + ${P}; if (${1===L}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), initializationValue, initializationValue, initializationValue ); ${z} } else if (${2===L}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), getValue(batch, xD, xR, xC + ${y}, ch), initializationValue, initializationValue ); ${z} } else if (${3===L}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), getValue(batch, xD, xR, xC + ${y}, ch), getValue(batch, xD, xR, xC + 2 * ${y}, ch), initializationValue ); ${z} } } } setOutput(${D}); } `}};function kernels_AvgPool_avgPool(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r;webgl_util_assertNotComplex(s,"avgPool");let{filterSize:i,strides:o,pad:l,dimRoundingMode:u}=n;assert(eitherStridesOrDilationsAreOne(o,1),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${o} and dilations '1'`);let p=computePool2DInfo(s.shape,i,o,1,l,u);if(1===p.filterWidth&&1===p.filterHeight&&arraysEqual(p.inShape,p.outShape))return kernels_Identity_identity({inputs:{x:s},backend:a});let m=new Pool2DProgram(p,"avg",!1);return a.runWebGLProgram(m,[s],"float32")}function AvgPool3D_avgPool3D(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{filterSize:i,strides:o,pad:l,dimRoundingMode:u,dataFormat:p}=n,m=new Pool3DProgram(computePool3DInfo(s.shape,i,o,[1,1,1],l,u,p),"avg",!1);return a.runWebGLProgram(m,[s],"float32")}let AvgPool2DBackpropProgram=class AvgPool2DBackpropProgram{constructor(t){this.variableNames=["dy"],this.outputShape=t.inShape;const r=t.filterHeight,a=t.filterWidth,n=t.strideHeight,s=t.strideWidth,i=t.dilationHeight,o=t.dilationWidth,l=t.effectiveFilterHeight,u=t.effectiveFilterWidth,p=l-1-t.padInfo.top,m=u-1-t.padInfo.left;this.userCode=` const ivec2 pads = ivec2(${p}, ${m}); const float avgMultiplier = float(${1/(r*a)}); void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int d = coords[3]; ivec2 dyRCCorner = coords.yz - pads; int dyRCorner = dyRCCorner.x; int dyCCorner = dyRCCorner.y; // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; for (int wR = 0; wR < ${l}; wR += ${i}) { float dyR = float(dyRCorner + wR) / ${n}.0; if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); for (int wC = 0; wC < ${u}; wC+= ${o}) { float dyC = float(dyCCorner + wC) / ${s}.0; if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); float dyValue = getDy(b, idyR, idyC, d); dotProd += dyValue * avgMultiplier; } } setOutput(dotProd); } `}};let AvgPool3DBackpropProgram=class AvgPool3DBackpropProgram{constructor(t){this.variableNames=["dy"],this.outputShape=t.inShape;const r=t.filterDepth,a=t.filterHeight,n=t.filterWidth,s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,l=t.dilationDepth,u=t.dilationHeight,p=t.dilationWidth,m=t.effectiveFilterDepth,y=t.effectiveFilterHeight,_=t.effectiveFilterWidth,w=m-1-t.padInfo.front,I=y-1-t.padInfo.top,C=_-1-t.padInfo.left;this.userCode=` const ivec3 pads = ivec3(${w}, ${I}, ${C}); const float avgMultiplier = float(${1/(r*a*n)}); void main() { ivec5 coords = getOutputCoords(); int batch = coords.x; int ch = coords.u; ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads; int dyDCorner = dyCorner.x; int dyRCorner = dyCorner.y; int dyCCorner = dyCorner.z; // Convolve dy(?, ?, ?, d) with pos mask(:, :, :, ch) to get // dx(xD, xR, xC, ch). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; for (int wD = 0; wD < ${m}; wD += ${l}) { float dyD = float(dyDCorner + wD) / ${s}.0; if (dyD < 0.0 || dyD >= ${t.outDepth}.0 || fract(dyD) > 0.0) { continue; } int idyD = int(dyD); for (int wR = 0; wR < ${y}; wR += ${u}) { float dyR = float(dyRCorner + wR) / ${i}.0; if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); for (int wC = 0; wC < ${_}; wC += ${p}) { float dyC = float(dyCCorner + wC) / ${o}.0; if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); float dyValue = getDy(batch, idyD, idyR, idyC, ch); dotProd += dyValue * avgMultiplier; } } } setOutput(dotProd); } `}};function AvgPool3DGrad_avgPool3DGrad(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,input:i}=r,{filterSize:o,strides:l,pad:u,dimRoundingMode:p}=n,m=new AvgPool3DBackpropProgram(computePool3DInfo(i.shape,o,l,[1,1,1],u,p));return a.runWebGLProgram(m,[s],i.dtype)}function kernels_AvgPoolGrad_avgPoolGrad(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,input:i}=r;webgl_util_assertNotComplex([s,i],"avgPoolGrad");let{filterSize:o,strides:l,pad:u}=n,p=new AvgPool2DBackpropProgram(computePool2DInfo(i.shape,o,l,1,u));return a.runWebGLProgram(p,[s],i.dtype)}function BatchMatMul_batchMatMul(t){let{inputs:r,backend:a,attrs:n}=t,{a:s,b:i}=r,{transposeA:o,transposeB:l}=n;return batchMatMulImpl({a:s,b:i,transposeA:o,transposeB:l,backend:a})}let BatchNormProgram=class BatchNormProgram{constructor(t,r,a,n,s,i){this.outputShape=[],this.variableNames=["x","mean","variance"],assertAndGetBroadcastShape(t,r),assertAndGetBroadcastShape(t,a);let o="0.0";null!=n&&(assertAndGetBroadcastShape(t,n),this.variableNames.push("offset"),o="getOffsetAtOutCoords()");let l="1.0";null!=s&&(assertAndGetBroadcastShape(t,s),this.variableNames.push("scale"),l="getScaleAtOutCoords()"),this.outputShape=t,this.userCode=` void main() { float x = getXAtOutCoords(); float mean = getMeanAtOutCoords(); float variance = getVarianceAtOutCoords(); float offset = ${o}; float scale = ${l}; float inv = scale * inversesqrt(variance + float(${i})); setOutput(dot(vec3(x, -mean, offset), vec3(inv, inv, 1))); } `}};let BatchNormPackedProgram=class BatchNormPackedProgram{constructor(t,r,a,n,s,i){this.packedInputs=!0,this.packedOutput=!0,this.variableNames=["x","mean","variance"],assertAndGetBroadcastShape(t,r),assertAndGetBroadcastShape(t,a);let o="vec4(0.0)";null!=n&&(assertAndGetBroadcastShape(t,n),this.variableNames.push("offset"),o="getOffsetAtOutCoords()");let l="vec4(1.0)";null!=s&&(assertAndGetBroadcastShape(t,s),this.variableNames.push("scale"),l="getScaleAtOutCoords()"),this.outputShape=t,this.userCode=` void main() { vec4 offset = ${o}; vec4 scale = ${l}; vec4 x = getXAtOutCoords(); vec4 mean = getMeanAtOutCoords(); vec4 variance = getVarianceAtOutCoords(); vec4 inv = scale * inversesqrt(variance + vec4(${i})); setOutput((x - mean) * inv + offset); } `}};let SliceProgram=class SliceProgram{constructor(t){let r;this.variableNames=["source"],this.outputShape=t,this.rank=t.length;const a=getCoordsDataType(this.rank);this.customUniforms=[{name:"start",arrayIndex:this.rank,type:"int"}];const n=getCoords(this.rank),s=t.map((t,r)=>`sourceLoc.${h_[r]} = start[${r}] + coords.${h_[r]};`);r=` ${a} sourceLoc; ${a} coords = getOutputCoords(); ${s.join(` `)} `,this.userCode=` void main() { ${r} setOutput(getSource(${n})); } `}};let h_=["x","y","z","w","u","v"];function getCoords(t){if(1===t)return"sourceLoc";if(t<=6)return h_.slice(0,t).map(t=>"sourceLoc."+t).join(",");throw Error(`Slicing for rank ${t} is not yet supported`)}let SlicePackedProgram=class SlicePackedProgram{constructor(t){this.variableNames=["source"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t,this.rank=t.length,this.customUniforms=[{name:"start",arrayIndex:this.rank,type:"int"}];const r=getCoordsDataType(this.rank),a=getChannels("coords",this.rank),n=getChannels("sourceLoc",this.rank),s=1===this.rank?"sourceLoc":`vec2(${n.slice(-2).join()})`,i=`getChannel(getSource(${n.join()}), ${s})`,o=` result.x = ${i}; if (++${a[this.rank-1]} < ${t[this.rank-1]}) { ++${n[this.rank-1]}; result.y = ${i}; --${n[this.rank-1]}; } `,l=1===this.rank?"":` --${a[this.rank-1]}; if (++${a[this.rank-2]} < ${t[this.rank-2]}) { ++${n[this.rank-2]}; result.z = ${i}; if (++${a[this.rank-1]} < ${t[this.rank-1]}) { ++${n[this.rank-1]}; result.w = ${i}; } } `,u=this.rank<=4?`sourceLoc = coords + ${r}(${t.map((t,r)=>`start[${r}]`).join()});`:t.map((t,r)=>`${n[r]} = ${a[r]} + start[${r}];`).join(` `);this.userCode=` void main() { ${r} coords = getOutputCoords(); ${r} sourceLoc; ${u} vec4 result = vec4(0.); ${o} ${l} setOutput(result); } `}};function shallowSlice(t,r,a,n){let s=n.texData.get(t.dataId),i=n.makeTensorInfo(a,t.dtype),o=n.texData.get(i.dataId);Object.assign(o,s),o.refCount=1,o.shape=a,o.dtype=t.dtype;let l=computeFlatOffset(r,computeStrides(t.shape));s.slice&&(l+=s.slice.flatOffset),o.slice={flatOffset:l,origDataId:s.slice&&s.slice.origDataId||t.dataId};let u=n.dataRefCount.get(o.slice.origDataId)||1;return n.dataRefCount.set(o.slice.origDataId,u+1),i}function kernels_Slice_slice(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{begin:i,size:o}=n,[l,u]=parseSliceParams(s,i,o);if(assertParamsValid(s,l,u),0===sizeFromShape(u))return a.makeTensorInfo(u,s.dtype,[]);if(a.shouldExecuteOnCPU([s])||"string"===s.dtype){let t=pB(a.texData.get(s.dataId).values,l,u,s.shape,s.dtype);return a.makeTensorInfo(u,s.dtype,t)}let{isPacked:p}=a.texData.get(s.dataId),m=isSliceContinous(s.shape,l,u);if(p||!m){let t=eV.getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new SlicePackedProgram(u):new SliceProgram(u),r=[l];return a.runWebGLProgram(t,[s],s.dtype,r)}return a.uploadToGPU(s.dataId),shallowSlice(s,l,u,a)}function kernels_Bincount_bincount(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,weights:i}=r,{size:o}=n,l=po(a.readSync(s.dataId),a.readSync(i.dataId),i.dtype,i.shape,o);return a.makeTensorInfo([o],i.dtype,l)}let hT=` int r = int(a.r) & int(b.r); int g = int(a.g) & int(b.g); int rb = int(a.b) & int(b.b); int ra = int(a.a) & int(b.a); return vec4(r, g, rb, ra); `,hk=` return float(int(a.r) & int(b.r)); `;function kernels_BitwiseAnd_bitwiseAnd(t){let r,{inputs:a,backend:n}=t,{a:s,b:i}=a,o=eV.getBool("WEBGL_PACK_BINARY_OPERATIONS"),l=eV.getNumber("WEBGL_VERSION");if(n.shouldExecuteOnCPU([s,i])||1===l){let t=n.texData.get(s.dataId).values,r=n.texData.get(i.dataId).values,[a,o]=pu(s.shape,i.shape,t,r,s.dtype),l=n.makeTensorInfo(o,s.dtype);return n.texData.get(l.dataId).values=a,l}return r=o?new BinaryOpPackedProgram(hT,s.shape,i.shape,!1):new BinaryOpProgram(hk,s.shape,i.shape),n.runWebGLProgram(r,[s,i],s.dtype)}function kernels_BroadcastArgs_broadcastArgs(t){let{inputs:r,backend:a}=t,{s0:n,s1:s}=r,i=a.readSync(n.dataId),o=a.readSync(s.dataId),l=assertAndGetBroadcastShape(Array.from(i),Array.from(o));return a.makeTensorInfo([l.length],"int32",Int32Array.from(l))}let hS=kernel_funcs_utils_binaryKernelFunc({opSnippet:"return float(a != b);",cpuKernelImpl:p$,dtype:"bool"});function kernels_Real_real(t){let{inputs:r,backend:a}=t,{input:n}=r;return kernels_Identity_identity({inputs:{x:a.texData.get(n.dataId).complexTensorInfos.real},backend:a})}function int_int(t,r){let a=new UnaryOpProgram(t.shape,"return float(int(x));"),n=r.runWebGLProgram(a,[t],"int32");return{dataId:n.dataId,shape:n.shape,dtype:n.dtype}}function kernels_Cast_cast(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{dtype:i}=n;if("complex64"===i){if("complex64"===s.dtype)return kernels_Identity_identity({inputs:{x:s},backend:a});let t=zeros(s.shape),r=kernels_Cast_cast({inputs:{x:s},backend:a,attrs:{dtype:"float32"}}),n=kernels_Complex_complex({inputs:{real:r,imag:t},backend:a});return t.dispose(),a.disposeIntermediateTensorInfo(r),n}if("complex64"===s.dtype){let t=kernels_Real_real({inputs:{input:s},backend:a}),r=kernels_Cast_cast({inputs:{x:t},backend:a,attrs:{dtype:i}});return a.disposeIntermediateTensorInfo(t),r}if(!hasEncodingLoss(s.dtype,i)){let t=kernels_Identity_identity({inputs:{x:s},backend:a});return{dataId:t.dataId,shape:t.shape,dtype:i}}if(a.shouldExecuteOnCPU([s])){let[t,r,n]=pp(a.texData.get(s.dataId).values,s.shape,s.dtype,i);return a.makeTensorInfo(t,r,n)}if("int32"===i)return int_int(s,a);if("bool"===i){let t=a.makeTensorInfo([],"bool",getArrayFromDType("bool",1)),r=hS({inputs:{a:s,b:t},backend:a});return a.disposeIntermediateTensorInfo(t),r}throw Error(`Error in Cast: failed to cast ${s.dtype} to ${i}`)}let hw="return ceil(x);",hI=kernel_funcs_utils_unaryKernelFunc({opSnippet:hw,packedOpSnippet:hw,cpuKernelImpl:ph});let ClipProgram=class ClipProgram{constructor(t){this.variableNames=["A"],this.customUniforms=[{name:"minVal",type:"float"},{name:"maxVal",type:"float"}],this.outputShape=t,this.userCode=` void main() { float value = getAAtOutCoords(); if (isnan(value)) { setOutput(value); return; } setOutput(clamp(value, minVal, maxVal)); } `}};let ClipPackedProgram=class ClipPackedProgram{constructor(t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.customUniforms=[{name:"minVal",type:"float"},{name:"maxVal",type:"float"}],this.outputShape=t,this.userCode=` void main() { vec4 value = getAAtOutCoords(); if (any(isnan(value))) { setOutput(value); return; } setOutput(clamp(value, vec4(minVal), vec4(maxVal))); } `}};function kernels_ClipByValue_clipByValue(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a,{clipValueMin:o,clipValueMax:l}=s;return r=eV.getBool("WEBGL_PACK_CLIP")?new ClipPackedProgram(i.shape):new ClipProgram(i.shape),n.runWebGLProgram(r,[i],i.dtype,[[o],[l]])}let ComplexAbsProgram=class ComplexAbsProgram{constructor(t){this.variableNames=["real","imag"],this.outputShape=t,this.userCode=` void main() { float re = abs(getRealAtOutCoords()); float im = abs(getImagAtOutCoords()); float mx = max(re, im); // sadly the length function in glsl is not underflow-safe // (at least not on Intel GPUs). So the safe solution is // to ensure underflow-safety in all cases. setOutput( mx == 0.0 ? 0.0 : mx * length(vec2(1, min(re, im)/mx)) ); } `}};function makeComplexComponentTensorInfo(t,r){return{dataId:r.dataId,dtype:r.dtype,shape:t.shape}}function ComplexAbs_complexAbs(t){let{inputs:r,backend:a}=t,{x:n}=r,s=a.texData.get(n.dataId),i=new ComplexAbsProgram(n.shape),o=[makeComplexComponentTensorInfo(n,s.complexTensorInfos.real),makeComplexComponentTensorInfo(n,s.complexTensorInfos.imag)];return a.runWebGLProgram(i,o,o[0].dtype)}let ConcatProgram=class ConcatProgram{constructor(t){this.outputShape=[],this.outputShape=concat_util_computeOutShape(t,1),this.variableNames=t.map((t,r)=>`T${r}`);const r=Array(t.length-1);r[0]=t[0][1];for(let a=1;a`T${r}`);const l=Array(t.length-1);l[0]=t[0][r];for(let a=1;a= ${l[t-1]}) { return getChannel( getT${t}(${shiftedChannels(o,u,r)}), vec2(${shiftedChannels(p,u,r)})); }`}const _=l.length,w=l[l.length-1];y+=` return getChannel( getT${_}(${shiftedChannels(o,u,w)}), vec2(${shiftedChannels(p,u,w)}));`,this.userCode=` float getValue(${o.map(t=>"int "+t)}) { ${y} } void main() { ${s} coords = getOutputCoords(); vec4 result = vec4(getValue(${i}), 0., 0., 0.); ${i[n-1]} = ${i[n-1]} + 1; if (${i[n-1]} < ${a[n-1]}) { result.g = getValue(${i}); } ${i[n-2]} = ${i[n-2]} + 1; if (${i[n-2]} < ${a[n-2]}) { result.a = getValue(${i}); } ${i[n-1]} = ${i[n-1]} - 1; if (${i[n-2]} < ${a[n-2]} && ${i[n-1]} < ${a[n-1]}) { result.b = getValue(${i}); } setOutput(result); } `}};function shiftedChannels(t,r,a){let n=t.indexOf(r);return t.map((t,r)=>r===n?`${t} - ${a}`:t).join()}function kernels_Imag_imag(t){let{inputs:r,backend:a}=t,{input:n}=r;return kernels_Identity_identity({inputs:{x:a.texData.get(n.dataId).complexTensorInfos.imag},backend:a})}function Concat_impl_concatImpl(t,r,a){let n=t[0].dtype;if("complex64"===n){let n=t.map(t=>kernels_Real_real({inputs:{input:t},backend:a})),s=t.map(t=>kernels_Imag_imag({inputs:{input:t},backend:a})),i=Concat_impl_concatImpl(n,r,a),o=Concat_impl_concatImpl(s,r,a),l=kernels_Complex_complex({inputs:{real:i,imag:o},backend:a});return n.forEach(t=>a.disposeIntermediateTensorInfo(t)),s.forEach(t=>a.disposeIntermediateTensorInfo(t)),a.disposeIntermediateTensorInfo(i),a.disposeIntermediateTensorInfo(o),l}let s=a.shouldExecuteOnCPU(t);if("string"===n&&(s=!0),s){let s=t.map(t=>{let n=sizeFromShape(t.shape.slice(r));return kernels_Reshape_reshape({inputs:{x:t},backend:a,attrs:{shape:[-1,n]}})}),i=pc(s.map(t=>({vals:a.readSync(t.dataId),shape:t.shape})),concat_util_computeOutShape(s.map(t=>t.shape),1),n,1===s[0].shape[0]),o=concat_util_computeOutShape(t.map(t=>t.shape),r),l=a.makeTensorInfo(o,n,i);return s.forEach(t=>a.disposeIntermediateTensorInfo(t)),l}let i=t.filter(t=>sizeFromShape(t.shape)>0),o=eV.getBool("WEBGL_PACK_ARRAY_OPERATIONS")&&i[0].shape.length>1;if(1===i.length){let r=o?new UnaryOpProgram(t[0].shape,p5):new UnaryOpPackedProgram(t[0].shape,p5);return a.runWebGLProgram(r,t,n)}let l=eV.getNumber("WEBGL_MAX_TEXTURES_IN_SHADER");if(i.length>l){let t=[];for(let n=0;nt.shape),r);return a.runWebGLProgram(t,i,n)}let{tensors2D:u,outShape:p}=computeTensors2D(i,r,a),m=new ConcatProgram(u.map(t=>t.shape)),y=a.runWebGLProgram(m,u,n);u.forEach(t=>a.disposeIntermediateTensorInfo(t));let _=kernels_Reshape_reshape({inputs:{x:y},attrs:{shape:p},backend:a});return a.disposeIntermediateTensorInfo(y),_}function computeTensors2D(t,r,a){let n=concat_util_computeOutShape(t.map(t=>t.shape),r);return{tensors2D:t.map(t=>kernels_Reshape_reshape({inputs:{x:t},attrs:{shape:[-1,sizeFromShape(t.shape.slice(r))]},backend:a})),outShape:n}}function kernels_Concat_concat(t){let{inputs:r,backend:a,attrs:n}=t,{axis:s}=n,i=parseAxisParam(s,r[0].shape)[0];assertParamsConsistent(r.map(t=>t.shape),i);let o=concat_util_computeOutShape(r.map(t=>t.shape),i);if(0===sizeFromShape(o))return a.makeTensorInfo(o,r[0].dtype,[]);let l=r.filter(t=>sizeFromShape(t.shape)>0);return 1===l.length?kernels_Identity_identity({inputs:{x:l[0]},backend:a}):Concat_impl_concatImpl(l,i,a)}let Conv2DProgram=class Conv2DProgram{constructor(t,r=!1,a=null,n=!1,s=!1){this.variableNames=["x","W"],this.outputShape=t.outShape;const i=t.padInfo.top,o=t.padInfo.left,l=t.strideHeight,u=t.strideWidth,p=t.dilationHeight,m=t.dilationWidth,y=t.filterHeight,_=t.filterWidth,w=4*Math.floor(t.inChannels/4),I=t.inChannels%4,C="channelsLast"===t.dataFormat;let E="",A="";a&&(E=n?`float activation(float a) { float b = getPreluActivationWeightsAtOutCoords(); ${a} }`:s?`float activation(float a) { float b = getLeakyreluAlphaAtOutCoords(); ${a} }`:` float activation(float x) { ${a} } `,A="result = activation(result);"),r&&this.variableNames.push("bias"),n&&this.variableNames.push("preluActivationWeights"),s&&this.variableNames.push("leakyreluAlpha"),this.userCode=` ${E} const ivec2 strides = ivec2(${l}, ${u}); const ivec2 pads = ivec2(${i}, ${o}); void main() { ivec4 coords = getOutputCoords(); int batch = coords[0]; int d2 = coords[${C?3:1}]; ivec2 xRCCorner = ivec2(coords[${C?1:2}], coords[${C?2:3}]) * strides - pads; int xRCorner = xRCCorner.x; int xCCorner = xRCCorner.y; // Convolve x(?, ?, d1) with w(:, :, d1, d2) to get y(yR, yC, d2). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; for (int wR = 0; wR < ${y}; wR++) { int xR = xRCorner + wR * ${p}; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int wC = 0; wC < ${_}; wC++) { int xC = xCCorner + wC * ${m}; if (xC < 0 || xC >= ${t.inWidth}) { continue; } for (int d1 = 0; d1 < ${w}; d1 += 4) { vec4 wValues = vec4( getW(wR, wC, d1, d2), getW(wR, wC, d1 + 1, d2), getW(wR, wC, d1 + 2, d2), getW(wR, wC, d1 + 3, d2) ); if (${C}) { vec4 xValues = vec4( getX(batch, xR, xC, d1), getX(batch, xR, xC, d1 + 1), getX(batch, xR, xC, d1 + 2), getX(batch, xR, xC, d1 + 3) ); dotProd += dot(xValues, wValues); } else { vec4 xValues = vec4( getX(batch, d1, xR, xC), getX(batch, d1 + 1, xR, xC), getX(batch, d1 + 2, xR, xC), getX(batch, d1 + 3, xR, xC) ); dotProd += dot(xValues, wValues); } } if (${1===I}) { if (${C}) { dotProd += getX(batch, xR, xC, ${w}) * getW(wR, wC, ${w}, d2); } else { dotProd += getX(batch, ${w}, xR, xC) * getW(wR, wC, ${w}, d2); } } else if (${2===I}) { vec2 wValues = vec2( getW(wR, wC, ${w}, d2), getW(wR, wC, ${w} + 1, d2) ); if (${C}) { vec2 xValues = vec2( getX(batch, xR, xC, ${w}), getX(batch, xR, xC, ${w} + 1) ); dotProd += dot(xValues, wValues); } else { vec2 xValues = vec2( getX(batch, ${w}, xR, xC), getX(batch, ${w} + 1, xR, xC) ); dotProd += dot(xValues, wValues); } } else if (${3===I}) { vec3 wValues = vec3( getW(wR, wC, ${w}, d2), getW(wR, wC, ${w} + 1, d2), getW(wR, wC, ${w} + 2, d2) ); if (${C}) { vec3 xValues = vec3( getX(batch, xR, xC, ${w}), getX(batch, xR, xC, ${w} + 1), getX(batch, xR, xC, ${w} + 2) ); dotProd += dot(xValues, wValues); } else { vec3 xValues = vec3( getX(batch, ${w}, xR, xC), getX(batch, ${w} + 1, xR, xC), getX(batch, ${w} + 2, xR, xC) ); dotProd += dot(xValues, wValues); } } } } float result = dotProd; ${r?"result += getBiasAtOutCoords();":""} ${A} setOutput(result); } `}};let Conv3DProgram=class Conv3DProgram{constructor(t){this.variableNames=["x","W"],this.outputShape=t.outShape;const r=t.padInfo.front,a=t.padInfo.top,n=t.padInfo.left,s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,l=t.dilationDepth,u=t.dilationHeight,p=t.dilationWidth,m=t.filterDepth,y=t.filterHeight,_=t.filterWidth,w=4*Math.floor(t.inChannels/4),I=t.inChannels%4;this.userCode=` const ivec3 strides = ivec3(${s}, ${i}, ${o}); const ivec3 pads = ivec3(${r}, ${a}, ${n}); void main() { ivec5 coords = getOutputCoords(); int batch = coords.x; int d2 = coords.u; ivec3 xFRCCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads; int xFCorner = xFRCCorner.x; int xRCorner = xFRCCorner.y; int xCCorner = xFRCCorner.z; // Convolve x(?, ?, ?, d1) with w(:, :, :, d1, d2) to get // y(yF, yR, yC, d2). ? = to be determined. : = across all // values in that axis. float dotProd = 0.0; for (int wF = 0; wF < ${m}; wF++) { int xF = xFCorner + wF * ${l}; if (xF < 0 || xF >= ${t.inDepth}) { continue; } for (int wR = 0; wR < ${y}; wR++) { int xR = xRCorner + wR * ${u}; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int wC = 0; wC < ${_}; wC++) { int xC = xCCorner + wC * ${p}; if (xC < 0 || xC >= ${t.inWidth}) { continue; } for (int d1 = 0; d1 < ${w}; d1 += 4) { vec4 xValues = vec4( getX(batch, xF, xR, xC, d1), getX(batch, xF, xR, xC, d1 + 1), getX(batch, xF, xR, xC, d1 + 2), getX(batch, xF, xR, xC, d1 + 3) ); vec4 wValues = vec4( getW(wF, wR, wC, d1, d2), getW(wF, wR, wC, d1 + 1, d2), getW(wF, wR, wC, d1 + 2, d2), getW(wF, wR, wC, d1 + 3, d2) ); dotProd += dot(xValues, wValues); } if (${1===I}) { dotProd += getX(batch, xF, xR, xC, ${w}) * getW(wF, wR, wC, ${w}, d2); } else if (${2===I}) { vec2 xValues = vec2( getX(batch, xF, xR, xC, ${w}), getX(batch, xF, xR, xC, ${w} + 1) ); vec2 wValues = vec2( getW(wF, wR, wC, ${w}, d2), getW(wF, wR, wC, ${w} + 1, d2) ); dotProd += dot(xValues, wValues); } else if (${3===I}) { vec3 xValues = vec3( getX(batch, xF, xR, xC, ${w}), getX(batch, xF, xR, xC, ${w} + 1), getX(batch, xF, xR, xC, ${w} + 2) ); vec3 wValues = vec3( getW(wF, wR, wC, ${w}, d2), getW(wF, wR, wC, ${w} + 1, d2), getW(wF, wR, wC, ${w} + 2, d2) ); dotProd += dot(xValues, wValues); } } } } setOutput(dotProd); } `}};let Conv2DPackedProgram=class Conv2DPackedProgram{constructor(t,r=!1,a=null,n=!1,s=!1){this.variableNames=["x","W"],this.packedInputs=!0,this.packedOutput=!0,this.customUniforms=[{name:"pads",type:"ivec2"},{name:"strides",type:"ivec2"},{name:"dilations",type:"ivec2"},{name:"inDims",type:"ivec2"}],this.outputShape=t.outShape,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length);const i=t.padInfo.left,o=t.strideWidth,l=t.dilationWidth,u=t.filterHeight,p=t.filterWidth;let m=` int xR; int xC; int xCOffset; vec4 wTexel; vec4 previous; vec4 final;`;for(let t=0;t=0 && xR < inDims[0]) { `;for(let r=0;r<(p+1)/2;r++){const a=2*r;if(m+=` xC = xCCorner + ${a*l}; `,1===o){if(a= 0 && xCOffset < inDims[1] && xTexelC${a}Ready == 0) { xTexelC${a} = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xCOffset + 1 >= inDims[1]) { xTexelC${a}.zw = vec2(0.0); } xTexelC${a}Ready = 1; } `,1===l&&a>0?m+=` xC${a} = vec4(xTexelC${a-2}.zw, xTexelC${a}.xy); `:m+=` xCOffset = xC + 1 - 2; if (xCOffset >= 0 && xCOffset < inDims[1]) { previous = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xCOffset + 1 >= inDims[1]) { previous.zw = vec2(0.0); } xC${a} = vec4(previous.zw, xTexelC${a}.xy); } else { xC${a} = vec4(0.0, 0.0, xTexelC${a}.xy); } `):m+=` if (xC >= 0 && xC < inDims[1] && xTexelC${a}Ready == 0) { xTexelC${a} = getX(batch, xR, xC, d1); if (xC + 1 >= inDims[1]) { xTexelC${a}.zw = vec2(0.0); } xTexelC${a}Ready = 1; } xC${a} = xTexelC${a}; `,a+1= 0 && xCOffset < inDims[1] && xTexelC${a+1}Ready == 0) { xTexelC${a+1} = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xCOffset + 1 >= inDims[1]) { xTexelC${a+1}.zw = vec2(0.0); } xTexelC${a+1}Ready = 1; } `,l>1?m+=` xCOffset -= 2; if (xCOffset >= 0 && xCOffset < inDims[1]) { previous = getX(batch, xR, xCOffset, d1); xC${a+1} = vec4(previous.zw, xTexelC${a+1}.xy); } else { xC${a+1} = vec4(0.0, 0.0, xTexelC${a+1}.xy); } `:m+=` xC${a+1} = vec4(xTexelC${a}.zw, xTexelC${a+1}.xy); `):1===t?m+=` xC${a+1} = xTexelC${a}; `:m+=` xCOffset = xC + ${t}; if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${a+1}Ready == 0) { xTexelC${a+1} = getX(batch, xR, xCOffset, d1); if (xCOffset + 1 >= inDims[1]) { xTexelC${a+1}.zw = vec2(0.0); } xTexelC${a+1}Ready = 1; } xC${a+1} = xTexelC${a+1}; `}}else a= 0 && xCOffset < inDims[1] && xTexelC${a}Ready == 0) { xTexelC${a} = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xCOffset + 1 >= inDims[1]) { xTexelC${a}.zw = vec2(0.0); } xTexelC${a}Ready = 1; } if(xC + 1 >= 0 && xC + 1 < inDims[1] && xTexelC${a+1}Ready == 0) { xTexelC${a+1} = getX(batch, xR, xC + 1, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xC + 2 >= inDims[1]) { xTexelC${a+1}.zw = vec2(0.0); } xTexelC${a+1}Ready = 1; } xC${a} = vec4(xTexelC${a}.zw, xTexelC${a+1}.zw); `,a+1= 0 && xCOffset < inDims[1]) { final = getX(batch, xR, xCOffset, d1); } xC${a+1} = vec4(xTexelC${a+1}.xy, final.xy); `)):(m+=` if(xC >= 0 && xC < inDims[1] && xTexelC${a}Ready == 0) { xTexelC${a} = getX(batch, xR, xC, d1); if (xC + 1 >= inDims[1]) { xTexelC${a}.zw = vec2(0.0); } xTexelC${a}Ready = 1; } xCOffset = xC + strides[1]; if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${a+1}Ready == 0) { xTexelC${a+1} = getX(batch, xR, xCOffset, d1); if (xCOffset + 1 >= inDims[1]) { xTexelC${a+1}.zw = vec2(0.); } xTexelC${a+1}Ready = 1; } xC${a} = vec4( xTexelC${a}.xy, xTexelC${a+1}.xy); `,a+1= 0) { // Use custom imod instead mod. On Intel GPU, mod may generate // unexpected value. // https://github.com/tensorflow/tfjs/issues/5447 offsetX = imod(blockIndex, outWidth) * stride[1] - pad[1]; d1 = offsetX + dilation[1] * (imod(pos, itemsPerBlockRow) / inChannels); if(d1 < inputShape[${o}] && d1 >= 0) { ch = imod(pos, inChannels); if (${s}) { innerDims = vec2(d1, ch); result[${2*t+r}] = getChannel( getA(rc.x, d0, int(innerDims.x), int(innerDims.y)), innerDims); } else { innerDims = vec2(d0, d1); result[${2*t+r}] = getChannel( getA(rc.x, ch, int(innerDims.x), int(innerDims.y)), innerDims); } } } } `;this.userCode=` void main() { ivec3 rc = getOutputCoords(); vec4 result = vec4(0); int blockIndex, pos, offsetY, d0, offsetX, d1, ch; vec2 innerDims; ${u} ${n.output} = result; } `}};function getShapeForBatchMatMul(t,r){let a=t.length;return a>=3?r?[...t.slice(0,-3),t[a-3]*t[a-2],t[a-1]]:[...t.slice(0,-3),t[a-3],t[a-2]*t[a-1]]:!r&&1===a&&t[0]>1?[t[0],1]:null}function conv2dByMatMul({x:t,filter:r,convInfo:a,backend:n,bias:s=null,preluActivationWeights:i=null,leakyreluAlpha:o=0,activation:l=null}){let u,p=t.shape,m=n.texData.get(t.dataId),y=a.inChannels,_=p[0]*p[1]*p[2],w=a.outChannels,I="channelsLast"===a.dataFormat,C=[];if(null!=i){let t=getShapeForBatchMatMul(i.shape,I);null!=t&&(i=kernels_Reshape_reshape({inputs:{x:i},backend:n,attrs:{shape:t}}),C.push(i))}if(null!=s){let t=getShapeForBatchMatMul(s.shape,I);null!=t&&(s=kernels_Reshape_reshape({inputs:{x:s},backend:n,attrs:{shape:t}}),C.push(s))}if(!((1===_||1===w)&&y>1e3)&&m.isPacked&&I&&null!=m.texture&&p[2]%2!=0&&arraysEqual(m.shape.slice(-3),p.slice(-3))){let y=p[0]*p[1]*(p[2]+1),_={dataId:t.dataId,shape:[1,y,a.inChannels],dtype:t.dtype},w=m.shape;m.shape=m.shape.slice(),m.shape[m.shape.length-2]++,assert(isReshapeFree(m.shape,_.shape),()=>`packed reshape ${m.shape} to ${_.shape} isn't free`);let I=kernels_Reshape_reshape({inputs:{x:r},backend:n,attrs:{shape:[1,a.inChannels,a.outChannels]}});C.push(I);let E=batchMatMulImpl({a:_,b:I,backend:n,transposeA:!1,transposeB:!1,bias:s,activation:l,preluActivationWeights:i,leakyreluAlpha:o}),A=n.texData.get(E.dataId);assert(A.isPacked,()=>"batchMatMul result is expected to be packed"),m.shape=w,A.shape=a.outShape,(u=kernels_Identity_identity({inputs:{x:E},backend:n})).shape=a.outShape,C.push(E)}else{let p=a.outHeight*a.outWidth,m=kernels_Reshape_reshape({inputs:{x:t},backend:n,attrs:{shape:I?[a.batchSize,p,a.inChannels]:[a.batchSize,a.inChannels,p]}}),y=kernels_Reshape_reshape({inputs:{x:r},backend:n,attrs:{shape:[1,a.inChannels,a.outChannels]}}),_=batchMatMulImpl({a:I?m:y,b:I?y:m,transposeA:!I,transposeB:!1,backend:n,bias:s,activation:l,preluActivationWeights:i,leakyreluAlpha:o});u=kernels_Reshape_reshape({inputs:{x:_},backend:n,attrs:{shape:a.outShape}}),C.push(m),C.push(y),C.push(_)}for(let t of C)n.disposeIntermediateTensorInfo(t);return u}function conv2dWithIm2Row({x:t,filter:r,convInfo:a,backend:n,bias:s=null,preluActivationWeights:i=null,leakyreluAlpha:o=0,activation:l=null}){let{filterWidth:u,filterHeight:p,inChannels:m,outWidth:y,outHeight:_,dataFormat:w}=a,I="channelsLast"===w,C=u*p*m,E=_*y,A=[a.batchSize,C,E],$=[];if(null!=i){let t=getShapeForBatchMatMul(i.shape,I);null!=t&&(i=kernels_Reshape_reshape({inputs:{x:i},backend:n,attrs:{shape:t}}),$.push(i))}if(null!=s){let t=getShapeForBatchMatMul(s.shape,I);null!=t&&(s=kernels_Reshape_reshape({inputs:{x:s},backend:n,attrs:{shape:t}}),$.push(s))}let F=kernels_Reshape_reshape({inputs:{x:r},backend:n,attrs:{shape:[1,C,sizeFromShape(r.shape)/C]}});$.push(F);let D=new Im2ColPackedProgram(A,a),P=[t.shape,[a.padInfo.top,a.padInfo.left],[a.strideHeight,a.strideWidth],[a.dilationHeight,a.dilationWidth],[a.inChannels],[a.filterWidth*a.inChannels],[a.outWidth]],L=n.runWebGLProgram(D,[t],"float32",P),z=kernels_Reshape_reshape({inputs:{x:L},backend:n,attrs:{shape:A}});$.push(L),$.push(z);let B=null!=s,G=null!=i,j="leakyrelu"===l,K=l?mapActivationToShaderProgram(l,!0):null,H=new MatMulPackedProgram(I?z.shape:F.shape,I?F.shape:z.shape,I?[a.batchSize,E,a.outChannels]:[a.batchSize,a.outChannels,E],!0,!1,B,K,G,j),q=I?[z,F]:[F,z];if(s&&q.push(s),G&&q.push(i),j){let t=n.makeTensorInfo([],"float32",createScalarValue(o,"float32"));q.push(t),$.push(t)}let Z=n.runWebGLProgram(H,q,"float32"),Q=kernels_Reshape_reshape({inputs:{x:Z},backend:n,attrs:{shape:a.outShape}});for(let t of($.push(Z),$))n.disposeIntermediateTensorInfo(t);return Q}function Conv2D_conv2d(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i,filter:o}=a,{strides:l,pad:u,dataFormat:p,dilations:m,dimRoundingMode:y}=s,_=convertConv2DDataFormat(p),w=computeConv2DInfo(i.shape,o.shape,l,m,u,y,!1,_);if(1===w.filterHeight&&1===w.filterWidth&&1===w.dilationHeight&&1===w.dilationWidth&&1===w.strideHeight&&1===w.strideWidth&&("SAME"===w.padInfo.type||"VALID"===w.padInfo.type))r=conv2dByMatMul({x:i,filter:o,convInfo:w,backend:n});else if(w.strideWidth<=2&&"channelsLast"===_&&eV.getBool("WEBGL_EXP_CONV")){let t=new Conv2DPackedProgram(w),a=[[w.padInfo.top,w.padInfo.left],[w.strideHeight,w.strideWidth],[w.dilationHeight,w.dilationWidth],[w.inHeight,w.inWidth]];r=n.runWebGLProgram(t,[i,o],"float32",a)}else if(eV.getBool("WEBGL_CONV_IM2COL"))r=conv2dWithIm2Row({x:i,filter:o,convInfo:w,backend:n});else{let t=new Conv2DProgram(w);r=n.runWebGLProgram(t,[i,o],"float32")}let I=kernels_Reshape_reshape({inputs:{x:r},backend:n,attrs:{shape:w.outShape}});return n.disposeIntermediateTensorInfo(r),I}let Conv2DDerFilterProgram=class Conv2DDerFilterProgram{constructor(t){this.variableNames=["x","dy"],this.outputShape=t.filterShape;const r=t.strideHeight,a=t.strideWidth,n=t.padInfo.top,s=t.padInfo.left,i="channelsLast"===t.dataFormat;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int wR = coords.x; int wC = coords.y; int d1 = coords.z; int d2 = coords.w; // Convolve x(?, ?, d1) with dy(:, :, d2) to get dw(wR, wC, d1, d2). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; for (int b = 0; b < ${t.batchSize}; b++) { for (int yR = 0; yR < ${t.outHeight}; yR++) { int xR = wR + yR * ${r} - ${n}; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int yC = 0; yC < ${t.outWidth}; yC++) { int xC = wC + yC * ${a} - ${s}; if (xC < 0 || xC >= ${t.inWidth}) { continue; } ${i?`float dyValue = getDy(b, yR, yC, d2); float xValue = getX(b, xR, xC, d1); dotProd += (xValue * dyValue);`:`float dyValue = getDy(b, d2, yR, yC); float xValue = getX(b, d1, xR, xC); dotProd += (xValue * dyValue);`} } } } setOutput(dotProd); } `}};let Conv2DDerInputProgram=class Conv2DDerInputProgram{constructor(t){this.variableNames=["dy","W"],this.outputShape=t.inShape;const r=t.filterHeight,a=t.filterWidth,n=t.strideHeight,s=t.strideWidth,i="channelsLast"===t.dataFormat,o=r-1-t.padInfo.top,l=a-1-t.padInfo.left;this.userCode=` const ivec2 pads = ivec2(${o}, ${l}); void main() { ivec4 coords = getOutputCoords(); int batch = coords[0]; int d1 = coords[${i?3:1}]; ivec2 dyCorner = ivec2(coords[${i?1:2}], coords[${i?2:3}]) - pads; int dyRCorner = dyCorner.x; int dyCCorner = dyCorner.y; // Convolve dy(?, ?, d2) with w(:, :, d1, d2) to compute dx(xR, xC, d1). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; for (int wR = 0; wR < ${r}; wR++) { float dyR = float(dyRCorner + wR) / ${n}.0; if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); int wRPerm = ${r} - 1 - wR; for (int wC = 0; wC < ${a}; wC++) { float dyC = float(dyCCorner + wC) / ${s}.0; if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); int wCPerm = ${a} - 1 - wC; for (int d2 = 0; d2 < ${t.outChannels}; d2++) { if (${i}) { float xValue = getDy(batch, idyR, idyC, d2); float wValue = getW(wRPerm, wCPerm, d1, d2); dotProd += xValue * wValue; } else { float xValue = getDy(batch, d2, idyR, idyC); float wValue = getW(wRPerm, wCPerm, d1, d2); dotProd += xValue * wValue; } } } } setOutput(dotProd); } `}};let Conv3DDerFilterProgram=class Conv3DDerFilterProgram{constructor(t){this.variableNames=["x","dy"],this.outputShape=t.filterShape;const r=t.strideDepth,a=t.strideHeight,n=t.strideWidth,s=t.padInfo.front,i=t.padInfo.top,o=t.padInfo.left;this.userCode=` void main() { ivec5 coords = getOutputCoords(); int wF = coords.x; int wR = coords.y; int wC = coords.z; int d1 = coords.w; int d2 = coords.u; float dotProd = 0.0; for (int b = 0; b < ${t.batchSize}; b++) { for (int yF = 0; yF < ${t.outDepth}; yF++) { int xF = wF + yF * ${r} - ${s}; if (xF < 0 || xF >= ${t.inDepth}) { continue; } for (int yR = 0; yR < ${t.outHeight}; yR++) { int xR = wR + yR * ${a} - ${i}; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int yC = 0; yC < ${t.outWidth}; yC++) { int xC = wC + yC * ${n} - ${o}; if (xC < 0 || xC >= ${t.inWidth}) { continue; } float dyValue = getDy(b, yF, yR, yC, d2); float xValue = getX(b, xF, xR, xC, d1); dotProd += (xValue * dyValue); } } } } setOutput(dotProd); } `}};let Conv3DDerInputProgram=class Conv3DDerInputProgram{constructor(t){this.variableNames=["dy","W"],this.outputShape=t.inShape;const r=t.filterDepth,a=t.filterHeight,n=t.filterWidth,s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,l=r-1-t.padInfo.front,u=a-1-t.padInfo.top,p=n-1-t.padInfo.left;this.userCode=` const ivec3 pads = ivec3(${l}, ${u}, ${p}); void main() { ivec5 coords = getOutputCoords(); int batch = coords.x; int d1 = coords.u; ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads; int dyFCorner = dyCorner.x; int dyRCorner = dyCorner.y; int dyCCorner = dyCorner.z; float dotProd = 0.0; for (int wF = 0; wF < ${r}; wF++) { float dyF = float(dyFCorner + wF) / ${s}.0; if (dyF < 0.0 || dyF >= ${t.outDepth}.0 || fract(dyF) > 0.0) { continue; } int idyF = int(dyF); int wFPerm = ${r} - 1 - wF; for (int wR = 0; wR < ${a}; wR++) { float dyR = float(dyRCorner + wR) / ${i}.0; if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); int wRPerm = ${a} - 1 - wR; for (int wC = 0; wC < ${n}; wC++) { float dyC = float(dyCCorner + wC) / ${o}.0; if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); int wCPerm = ${n} - 1 - wC; for (int d2 = 0; d2 < ${t.outChannels}; d2++) { float xValue = getDy(batch, idyF, idyR, idyC, d2); float wValue = getW(wFPerm, wRPerm, wCPerm, d1, d2); dotProd += xValue * wValue; } } } } setOutput(dotProd); } `}};function kernels_Conv2DBackpropFilter_conv2DBackpropFilter(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,dy:i}=r,{strides:o,pad:l,dataFormat:u,dimRoundingMode:p,filterShape:m}=n,y=convertConv2DDataFormat(u),_=new Conv2DDerFilterProgram(computeConv2DInfo(s.shape,m,o,1,l,p,!1,y));return a.runWebGLProgram(_,[s,i],"float32")}let Conv2DDerInputPackedProgram=class Conv2DDerInputPackedProgram{constructor(t){this.variableNames=["dy","W"],this.packedInputs=!0,this.packedOutput=!0,this.customUniforms=[{name:"strides",type:"vec2"}],this.outputShape=t.inShape,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length);const r=t.filterHeight,a=t.filterWidth,n=r-1-t.padInfo.top,s=a-1-t.padInfo.left;this.userCode=` const ivec2 pads = ivec2(${n}, ${s}); void main() { ivec4 coords = getOutputCoords(); int batch = coords[0]; int d1 = coords[3]; ivec2 dyCorner = ivec2(coords[1], coords[2]) - pads; int dyRCorner = dyCorner.x; int dyCCorner = dyCorner.y; vec4 result = vec4(0.); for (int wR = 0; wR < ${r}; wR++) { float dyR = float(dyRCorner + wR) / strides[0]; if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); int wRPerm = ${r} - 1 - wR; for (int wC = 0; wC < ${a}; wC++) { int wCPerm = ${a} - 1 - wC; float dyC = float(dyCCorner + wC) / strides[1]; bool idyCVal = (dyC >= 0.0) && (dyC < ${t.outWidth}.0) && (fract(dyC) == 0.0); int idyC = int(dyC); float dyC2 = float(dyCCorner + wC + 1) / strides[1]; bool idyCVal2 = (dyC2 >= 0.0) && (dyC2 < ${t.outWidth}.0) && (fract(dyC2) == 0.0); int idyC2 = int(dyC2); if (idyCVal && idyCVal2) { for (int d2 = 0; d2 < ${t.outChannels}; d2 += 2) { vec4 wValue = getW(wRPerm, wCPerm, d1, d2); vec4 dySample = getDy(batch, idyR, idyC, d2); vec4 dySample2 = (idyC / 2 == idyC2 / 2) ? dySample : getDy(batch, idyR, idyC2, d2); vec2 dyValue = mod(float(idyC), 2.) == 0. ? dySample.xy : dySample.zw; result.xy += vec2(dot(dyValue, wValue.xy), dot(dyValue, wValue.zw)); dyValue = mod(float(idyC2), 2.) == 0. ? dySample2.xy : dySample2.zw; result.zw += vec2(dot(dyValue, wValue.xy), dot(dyValue, wValue.zw)); } } else if (idyCVal) { for (int d2 = 0; d2 < ${t.outChannels}; d2 += 2) { vec4 wValue = getW(wRPerm, wCPerm, d1, d2); vec4 dySample = getDy(batch, idyR, idyC, d2); vec2 dyValue = mod(float(idyC), 2.) == 0. ? dySample.xy : dySample.zw; result.xy += vec2(dot(dyValue, wValue.xy), dot(dyValue, wValue.zw)); } } else if (idyCVal2) { for (int d2 = 0; d2 < ${t.outChannels}; d2 += 2) { vec4 wValue = getW(wRPerm, wCPerm, d1, d2); vec4 dySample = getDy(batch, idyR, idyC2, d2); vec2 dyValue = mod(float(idyC2), 2.) == 0. ? dySample.xy : dySample.zw; result.zw += vec2(dot(dyValue, wValue.xy), dot(dyValue, wValue.zw)); } } } } setOutput(result); } `}};function kernels_Conv2DBackpropInput_conv2DBackpropInput(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,filter:i}=r,{inputShape:o,strides:l,pad:u,dataFormat:p,dimRoundingMode:m}=n,y=convertConv2DDataFormat(p),_=computeConv2DInfo(o,i.shape,l,1,u,m,!1,y);if(eV.getBool("WEBGL_PACK_CONV2DTRANSPOSE")&&"channelsLast"===y){let t=[[_.strideHeight,_.strideWidth]],r=new Conv2DDerInputPackedProgram(_);return a.runWebGLProgram(r,[s,i],"float32",t)}{let t=new Conv2DDerInputProgram(_);return a.runWebGLProgram(t,[s,i],"float32")}}function Conv3D_conv3D(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,filter:i}=r,{strides:o,pad:l,dilations:u}=n,p=new Conv3DProgram(computeConv3DInfo(s.shape,i.shape,o,u,l));return a.runWebGLProgram(p,[s,i],"float32")}function Conv3DBackpropFilterV2_conv3DBackpropFilterV2(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,dy:i}=r,{strides:o,pad:l,filterShape:u}=n,p=new Conv3DDerFilterProgram(computeConv3DInfo(s.shape,u,o,1,l));return a.runWebGLProgram(p,[s,i],"float32")}function Conv3DBackpropInputV2_conv3DBackpropInput(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,filter:i}=r,{pad:o,strides:l,inputShape:u}=n,p=new Conv3DDerInputProgram(computeConv3DInfo(u,i.shape,l,1,o));return a.runWebGLProgram(p,[s,i],"float32")}let hN=kernel_funcs_utils_unaryKernelFunc({opSnippet:hu+` return cos(x); `,packedOpSnippet:` vec4 result = cos(x); bvec4 isNaN = isnan(x); ${hn} return result; `}),hC=kernel_funcs_utils_unaryKernelFunc({opSnippet:` float e2x = exp(-x); return (e2x + 1.0 / e2x) / 2.0; `});let CropAndResizeProgram=class CropAndResizeProgram{constructor(t,r,a,n,s){this.variableNames=["Image","Boxes","BoxInd"],this.outputShape=[];const[i,o,l,u]=t,[p]=r,[m,y]=a;this.outputShape=[p,m,y,u];const[_,w]=[`${o-1}.0`,`${l-1}.0`],[I,C,E]=m>1?[`${(o-1)/(m-1)}`,"(y2-y1) * height_ratio",`y1*${_} + float(y)*(height_scale)`]:["0.0","0.0",`0.5 * (y1+y2) * ${_}`],[A,$,F]=y>1?[`${(l-1)/(y-1)}`,"(x2-x1) * width_ratio",`x1*${w} + float(x)*(width_scale)`]:["0.0","0.0",`0.5 * (x1+x2) * ${w}`];this.userCode=` const float height_ratio = float(${I}); const float width_ratio = float(${A}); void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int y = coords[1]; int x = coords[2]; int d = coords[3]; // get box vals float y1 = getBoxes(b,0); float x1 = getBoxes(b,1); float y2 = getBoxes(b,2); float x2 = getBoxes(b,3); // get image in batch index int bInd = round(getBoxInd(b)); if(bInd < 0 || bInd >= ${i}) { return; } float height_scale = ${C}; float width_scale = ${$}; float in_y = ${E}; if( in_y < 0.0 || in_y > ${_} ) { setOutput(float(${s})); return; } float in_x = ${F}; if( in_x < 0.0 || in_x > ${w} ) { setOutput(float(${s})); return; } vec2 sourceFracIndexCR = vec2(in_x,in_y); if(${+("bilinear"===n)} == 1) { // Compute the four integer indices. ivec2 sourceFloorCR = ivec2(sourceFracIndexCR); ivec2 sourceCeilCR = ivec2(ceil(sourceFracIndexCR)); float topLeft = getImage(b, sourceFloorCR.y, sourceFloorCR.x, d); float bottomLeft = getImage(b, sourceCeilCR.y, sourceFloorCR.x, d); float topRight = getImage(b, sourceFloorCR.y, sourceCeilCR.x, d); float bottomRight = getImage(b, sourceCeilCR.y, sourceCeilCR.x, d); vec2 fracCR = sourceFracIndexCR - vec2(sourceFloorCR); float top = topLeft + (topRight - topLeft) * fracCR.x; float bottom = bottomLeft + (bottomRight - bottomLeft) * fracCR.x; float newValue = top + (bottom - top) * fracCR.y; setOutput(newValue); } else { // Compute the coordinators of nearest neighbor point. ivec2 sourceNearestCR = ivec2(floor( sourceFracIndexCR + vec2(0.5,0.5))); float newValue = getImage(b, sourceNearestCR.y, sourceNearestCR.x, d); setOutput(newValue); } } `}};(j=ep||(ep={})).Prod="*",j.Sum="+";let CumProgram=class CumProgram{constructor(t,r,a,n){this.op=t,this.outputShape=r,this.variableNames=["x"],this.customUniforms=[{name:"index",type:"float"}];const s=this.outputShape.length,i=this.op===ep.Prod?"1.0":"0.0",o=a?i:`getX(${cum_gpu_getCoords(s,"coords",this.op)})`,l=this.outputShape[this.outputShape.length-1];let u="",p="";a?(u=n?`end != ${l-1}`:"end != 0",p=n?"end + 1":"end - 1"):(u=n?`end + pow2 < ${l}`:"end >= pow2",p=n?"end + pow2":"end - pow2"),this.userCode=` void main() { ${getCoordsDataType(s)} coords = getOutputCoords(); int end = ${getFinalCoord(s,"coords",this.op)}; float val = ${o}; int pow2 = int(pow(2.0, index)); if (${u}) { int idx = ${p}; ${getFinalCoord(s,"coords",this.op)} = idx; val ${this.op}= getX(${cum_gpu_getCoords(s,"coords",this.op)}); } setOutput(val); } `}};function cum_gpu_getCoords(t,r,a){if(1===t)return`${r}`;if(2===t)return`${r}.x, ${r}.y`;if(3===t)return`${r}.x, ${r}.y, ${r}.z`;if(4===t)return`${r}.x, ${r}.y, ${r}.z, ${r}.w`;throw Error(`Cumulative ${a} for rank ${t} is not yet supported`)}function getFinalCoord(t,r,a){if(1===t)return`${r}`;if(2===t)return`${r}.y`;if(3===t)return`${r}.z`;if(4===t)return`${r}.w`;throw Error(`Cumulative ${a} for rank ${t} is not yet supported`)}function cumImpl(t,r,a,n,s,i){let o=r.shape.length,l=getAxesPermutation([n],o),u=r;null!=l&&(u=kernels_Transpose_transpose({inputs:{x:r},backend:a,attrs:{perm:l}}));let p=getInnerMostAxes(1,o)[0];if(p!==o-1)throw Error(`WebGL cumprod shader expects an inner-most axis=${r.shape.length-1} but got axis=${n}`);let m=u.shape[p],y=kernels_Identity_identity({inputs:{x:u},backend:a});for(let r=0;r<=Math.ceil(Math.log2(m))-1;r++){let n=new CumProgram(t,u.shape,!1,i),s=[[r]],o=y;y=a.runWebGLProgram(n,[y],y.dtype,s),a.disposeIntermediateTensorInfo(o)}if(s){let r=new CumProgram(t,u.shape,s,i),n=y;y=a.runWebGLProgram(r,[y],y.dtype),a.disposeIntermediateTensorInfo(n)}if(null!=l){let t=kernels_Transpose_transpose({inputs:{x:y},backend:a,attrs:{perm:getUndoAxesPermutation(l)}});return a.disposeIntermediateTensorInfo(y),a.disposeIntermediateTensorInfo(u),t}return y}function kernels_Cumprod_cumprod(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{axis:i,exclusive:o,reverse:l}=n;return cumImpl(ep.Prod,s,a,i,o,l)}function kernels_Cumsum_cumsum(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{axis:i,exclusive:o,reverse:l}=n;return cumImpl(ep.Sum,s,a,i,o,l)}function kernels_DenseBincount_denseBincount(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,weights:i}=r,{size:o,binaryOutput:l}=n;if(1===s.shape.length){let t=po(a.readSync(s.dataId),a.readSync(i.dataId),i.dtype,i.shape,o);return a.makeTensorInfo([o],i.dtype,t)}if(2===s.shape.length){let t=pl(a.bufferSync(s),a.bufferSync(i),o,l);return a.makeTensorInfo(t.shape,i.dtype,t.values)}throw Error(`Error in denseBincount: input must be at most rank 2, but got rank${s.shape.length}.`)}let DepthToSpaceProgram=class DepthToSpaceProgram{constructor(t,r,a){this.variableNames=["x"],this.outputShape=[],this.outputShape=t,this.blockSize=r,this.dataFormat=a,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int h = ${this.getHeightCoordString()}; int w = ${this.getWidthCoordString()}; int d = ${this.getDepthCoordString()}; int in_h = h / ${r}; int offset_h = imod(h, ${r}); int in_w = w / ${r}; int offset_w = imod(w, ${r}); int offset_d = (offset_h * ${r} + offset_w) * ${this.getOutputDepthSize()}; int in_d = d + offset_d; float result = ${this.getInputSamplingString()}; setOutput(result); } `}getHeightCoordString(){return"NHWC"===this.dataFormat?"coords[1]":"coords[2]"}getWidthCoordString(){return"NHWC"===this.dataFormat?"coords[2]":"coords[3]"}getDepthCoordString(){return"NHWC"===this.dataFormat?"coords[3]":"coords[1]"}getOutputDepthSize(){return"NHWC"===this.dataFormat?this.outputShape[3]:this.outputShape[1]}getInputSamplingString(){return"NHWC"===this.dataFormat?"getX(b, in_h, in_w, in_d)":"getX(b, in_d, in_h, in_w)"}};function kernels_DepthToSpace_depthToSpace(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{blockSize:i,dataFormat:o}=n,l=s.shape[0],u="NHWC"===o?s.shape[1]:s.shape[2],p="NHWC"===o?s.shape[2]:s.shape[3],m="NHWC"===o?s.shape[3]:s.shape[1],y=u*i,_=p*i,w=m/(i*i),I=new DepthToSpaceProgram("NHWC"===o?[l,y,_,w]:[l,w,y,_],i,o);return a.runWebGLProgram(I,[s],s.dtype)}let DepthwiseConv2DProgram=class DepthwiseConv2DProgram{constructor(t,r=!1,a=null,n=!1,s=!1){this.variableNames=["x","W"],this.customUniforms=[{name:"pads",type:"ivec2"},{name:"strides",type:"ivec2"},{name:"dilations",type:"ivec2"},{name:"inDims",type:"ivec2"}],this.outputShape=t.outShape,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length);const i=t.filterHeight,o=t.filterWidth,l=t.outChannels/t.inChannels;let u="",p="";a&&(u=n?`float activation(float a) { float b = getPreluActivationWeightsAtOutCoords(); ${a} }`:s?`float activation(float a) { float b = getLeakyreluAlphaAtOutCoords(); ${a} }`:` float activation(float x) { ${a} } `,p="result = activation(result);"),r&&this.variableNames.push("bias"),n&&this.variableNames.push("preluActivationWeights"),s&&this.variableNames.push("leakyreluAlpha"),this.userCode=` ${u} void main() { ivec4 coords = getOutputCoords(); int batch = coords.x; ivec2 xRCCorner = coords.yz * strides - pads; int d2 = coords.w; int d1 = d2 / ${l}; int q = d2 - d1 * ${l}; int xRCorner = xRCCorner.x; int xCCorner = xRCCorner.y; // Convolve x(?, ?, d1) with w(:, :, d1, q) to get y(yR, yC, d2). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; // TO DO(dsmilkov): Flatten the two for loops and vec4 the operations. for (int wR = 0; wR < ${i}; wR++) { int xR = xRCorner + wR * dilations[0]; if (xR < 0 || xR >= inDims[0]) { continue; } for (int wC = 0; wC < ${o}; wC++) { int xC = xCCorner + wC * dilations[1]; if (xC < 0 || xC >= inDims[1]) { continue; } float xVal = getX(batch, xR, xC, d1); float wVal = getW(wR, wC, d1, q); dotProd += xVal * wVal; } } float result = dotProd; ${r?"result += getBiasAtOutCoords();":""} ${p} setOutput(result); } `}};let DepthwiseConvPacked2DProgram=class DepthwiseConvPacked2DProgram{constructor(t,r=!1,a=null,n=!1,s=!1){this.variableNames=["x","W"],this.packedInputs=!0,this.packedOutput=!0,this.customUniforms=[{name:"pads",type:"ivec2"},{name:"strides",type:"ivec2"},{name:"dilations",type:"ivec2"},{name:"inDims",type:"ivec2"}],this.outputShape=t.outShape,this.enableShapeUniforms=useShapeUniforms(this.outputShape.length);const i=t.outChannels/t.inChannels,o=t.padInfo.left,l=t.strideWidth,u=t.dilationWidth,p=t.filterHeight,m=t.filterWidth;let y=` int xR; int xC; int xCOffset; vec4 wTexel; vec4 previous; vec4 final;`;for(let t=0;t=0 && xR < inDims[0]) { `;for(let t=0;t<(m+1)/2;t++){const r=2*t;if(y+=` xC = xCCorner + ${r*u}; `,1===l){if(r= 0 && xCOffset < inDims[1] && xTexelC${r}Ready == 0) { xTexelC${r} = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xCOffset + 1 >= inDims[1]) { xTexelC${r}.zw = vec2(0.0); } xTexelC${r}Ready = 1; } `,1===u&&r>0?y+=` xC${r} = vec4(xTexelC${r-2}.zw, xTexelC${r}.xy); `:y+=` xCOffset = xC + 1 - 2; if (xCOffset >= 0 && xCOffset < inDims[1]) { previous = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xCOffset + 1 >= inDims[1]) { previous.zw = vec2(0.0); } xC${r} = vec4(previous.zw, xTexelC${r}.xy); } else { xC${r} = vec4(0.0, 0.0, xTexelC${r}.xy); } `):y+=` if (xC >= 0 && xC < inDims[1] && xTexelC${r}Ready == 0) { xTexelC${r} = getX(batch, xR, xC, d1); if (xC + 1 >= inDims[1]) { xTexelC${r}.zw = vec2(0.0); } xTexelC${r}Ready = 1; } xC${r} = xTexelC${r}; `,r+1= 0 && xCOffset < inDims[1] && xTexelC${r+1}Ready == 0) { xTexelC${r+1} = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xCOffset + 1 >= inDims[1]) { xTexelC${r+1}.zw = vec2(0.0); } xTexelC${r+1}Ready = 1; } `,u>1?y+=` xCOffset -= 2; if (xCOffset >= 0 && xCOffset < inDims[1]) { previous = getX(batch, xR, xCOffset, d1); xC${r+1} = vec4(previous.zw, xTexelC${r+1}.xy); } else { xC${r+1} = vec4(0.0, 0.0, xTexelC${r+1}.xy); } `:y+=` xC${r+1} = vec4(xTexelC${r}.zw, xTexelC${r+1}.xy); `):1===t?y+=` xC${r+1} = xTexelC${r}; `:y+=` xCOffset = xC + ${t}; if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${r+1}Ready == 0) { xTexelC${r+1} = getX(batch, xR, xCOffset, d1); if (xCOffset + 1 >= inDims[1]) { xTexelC${r+1}.zw = vec2(0.0); } xTexelC${r+1}Ready = 1; } xC${r+1} = xTexelC${r+1}; `}}else r= 0 && xCOffset < inDims[1] && xTexelC${r}Ready == 0) { xTexelC${r} = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xCOffset + 1 >= inDims[1]) { xTexelC${r}.zw = vec2(0.0); } xTexelC${r}Ready = 1; } if(xC + 1 >= 0 && xC + 1 < inDims[1] && xTexelC${r+1}Ready == 0) { xTexelC${r+1} = getX(batch, xR, xC + 1, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if (xC + 2 >= inDims[1]) { xTexelC${r+1}.zw = vec2(0.0); } xTexelC${r+1}Ready = 1; } xC${r} = vec4(xTexelC${r}.zw, xTexelC${r+1}.zw); `,r+1= 0 && xCOffset < inDims[1]) { final = getX(batch, xR, xCOffset, d1); } xC${r+1} = vec4(xTexelC${r+1}.xy, final.xy); `)):(y+=` if(xC >= 0 && xC < inDims[1] && xTexelC${r}Ready == 0) { xTexelC${r} = getX(batch, xR, xC, d1); if (xC + 1 >= inDims[1]) { xTexelC${r}.zw = vec2(0.0); } xTexelC${r}Ready = 1; } xCOffset = xC + strides[1]; if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${r+1}Ready == 0) { xTexelC${r+1} = getX(batch, xR, xCOffset, d1); if (xCOffset + 1 >= inDims[1]) { xTexelC${r+1}.zw = vec2(0.); } xTexelC${r+1}Ready = 1; } xC${r} = vec4( xTexelC${r}.xy, xTexelC${r+1}.xy); `,r+1`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${l} and dilations '${y}'`);let _=computeConv2DInfo(i.shape,o.shape,l,y,u,m,!0);r=eV.getBool("WEBGL_PACK_DEPTHWISECONV")&&_.strideWidth<=2&&_.outChannels/_.inChannels==1?new DepthwiseConvPacked2DProgram(_):new DepthwiseConv2DProgram(_);let w=[[_.padInfo.top,_.padInfo.left],[_.strideHeight,_.strideWidth],[_.dilationHeight,_.dilationWidth],[_.inHeight,_.inWidth]];return n.runWebGLProgram(r,[i,o],"float32",w)}let DepthwiseConv2DDerFilterProgram=class DepthwiseConv2DDerFilterProgram{constructor(t){this.variableNames=["x","dy"],this.outputShape=t.filterShape;const r=t.strideHeight,a=t.strideWidth,n=t.padInfo.top,s=t.padInfo.left,i=t.outChannels/t.inChannels;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int wR = coords.x; int wC = coords.y; int d1 = coords.z; int dm = coords.w; int d2 = d1 * ${i} + dm; float dotProd = 0.0; // TO DO: Vec4 over the batch size for (int b = 0; b < ${t.batchSize}; b++) { for (int yR = 0; yR < ${t.outHeight}; yR++) { int xR = wR + yR * ${r} - ${n}; if (xR < 0 || xR >= ${t.inHeight}) { continue; } for (int yC = 0; yC < ${t.outWidth}; yC++) { int xC = wC + yC * ${a} - ${s}; if (xC < 0 || xC >= ${t.inWidth}) { continue; } float dyValue = getDy(b, yR, yC, d2); float xValue = getX(b, xR, xC, d1); dotProd += (xValue * dyValue); } } } setOutput(dotProd); } `}};let DepthwiseConv2DDerInputProgram=class DepthwiseConv2DDerInputProgram{constructor(t){this.variableNames=["dy","W"],this.outputShape=t.inShape;const r=t.filterHeight,a=t.filterWidth,n=t.strideHeight,s=t.strideWidth,i=r-1-t.padInfo.top,o=a-1-t.padInfo.left,l=t.outChannels/t.inChannels;this.userCode=` const ivec2 pads = ivec2(${i}, ${o}); void main() { ivec4 coords = getOutputCoords(); int batch = coords[0]; int d1 = coords[3]; ivec2 dyCorner = coords.yz - pads; int dyRCorner = dyCorner.x; int dyCCorner = dyCorner.y; float dotProd = 0.0; for (int wR = 0; wR < ${r}; wR++) { float dyR = float(dyRCorner + wR) / ${n}.0; if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); int wRPerm = ${r} - 1 - wR; for (int wC = 0; wC < ${a}; wC++) { float dyC = float(dyCCorner + wC) / ${s}.0; if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); int wCPerm = ${a} - 1 - wC; // TO DO: Vec4 over the channelMul for (int dm = 0; dm < ${l}; dm++) { int d2 = d1 * ${l} + dm; float xValue = getDy(batch, idyR, idyC, d2); float wValue = getW(wRPerm, wCPerm, d1, dm); dotProd += xValue * wValue; } } } setOutput(dotProd); } `}};function kernels_DepthwiseConv2dNativeBackpropFilter_depthwiseConv2dNativeBackpropFilter(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,dy:i}=r,{strides:o,dilations:l,pad:u,dimRoundingMode:p,filterShape:m}=n,y=new DepthwiseConv2DDerFilterProgram(computeConv2DInfo(s.shape,m,o,l,u,p,!0));return a.runWebGLProgram(y,[s,i],"float32")}function kernels_DepthwiseConv2dNativeBackpropInput_depthwiseConv2dNativeBackpropInput(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,filter:i}=r,{strides:o,dilations:l,pad:u,dimRoundingMode:p,inputShape:m}=n,y=new DepthwiseConv2DDerInputProgram(computeConv2DInfo(m,i.shape,o,l,u,p,!0));return a.runWebGLProgram(y,[s,i],"float32")}let DiagProgram=class DiagProgram{constructor(t){this.variableNames=["X"],this.outputShape=[t,t],this.userCode=` void main() { ivec2 coords = getOutputCoords(); float val = coords[0] == coords[1] ? getX(coords[0]) : 0.0; setOutput(val); } `}};function kernels_Diag_diag(t){let{inputs:r,backend:a}=t,{x:n}=r,s=[...n.shape,...n.shape],i=sizeFromShape(n.shape),o=kernels_Reshape_reshape({inputs:{x:n},backend:a,attrs:{shape:[i]}}),l=new DiagProgram(i),u=a.runWebGLProgram(l,[o],o.dtype),p=kernels_Reshape_reshape({inputs:{x:u},backend:a,attrs:{shape:s}});return a.disposeIntermediateTensorInfo(o),a.disposeIntermediateTensorInfo(u),p}let Dilation2DProgram=class Dilation2DProgram{constructor(t){this.variableNames=["x","W"],this.outputShape=t.outShape;const{inHeight:r,inWidth:a,padInfo:n,strideHeight:s,strideWidth:i,filterHeight:o,filterWidth:l,dilationHeight:u,dilationWidth:p}=t,{top:m,left:y}=n;this.userCode=` const ivec2 strides = ivec2(${s}, ${i}); const ivec2 pads = ivec2(${m}, ${y}); const float neg_infinity = -3.4e38; void main() { ivec4 coords = getOutputCoords(); int batch = coords.x; int d1 = coords.w; ivec2 outTopLeftCorner = coords.yz * strides - pads; int hBeg = outTopLeftCorner.x; int wBeg = outTopLeftCorner.y; float curVal = neg_infinity; for (int h = 0; h < ${o}; h++) { int hIn = hBeg + h * ${u}; if (hIn >= 0 && hIn < ${r}) { for (int w = 0; w < ${l}; w++) { int wIn = wBeg + w * ${p}; if (wIn >= 0 && wIn < ${a}) { float xVal = getX(batch, hIn, wIn, d1); float wVal = getW(h, w, d1); float val = xVal + wVal; if (val > curVal) { curVal = val; } } } } } float result = curVal; setOutput(result); } `}};function dilation2D(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i,filter:o}=a,{strides:l,pad:u,dilations:p}=s,m=computeDilation2DInfo(i.shape,o.shape,l,u,"NHWC",p),y=new Dilation2DProgram(m),_=kernels_Reshape_reshape({inputs:{x:r=n.runWebGLProgram(y,[i,o],"float32")},backend:n,attrs:{shape:m.outShape}});return n.disposeIntermediateTensorInfo(r),_}function kernels_Einsum_einsum(t){let{inputs:r,backend:a,attrs:n}=t,{equation:s}=n,{allDims:i,summedDims:o,idDims:l}=decodeEinsumEquation(s,r.length);checkEinsumDimSizes(i.length,l,r);let{path:u,steps:p}=getEinsumComputePath(o,l),m=p.length,y=null,_=i.length,w=[];for(let t=0;t=0&&(y=kernels_Sum_sum({inputs:{x:y},backend:a,attrs:{axis:u[t]-(i.length-_),keepDims:!1}}),w.push(y)),_--)}for(let t of w)t!==y&&a.disposeIntermediateTensorInfo(t);return y}let hE=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return (x >= 0.0) ? x : (exp(x) - 1.0);",packedOpSnippet:` vec4 result; result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0); result.g = (x.g >= 0.0) ? x.g : (exp(x.g) - 1.0); result.b = (x.b >= 0.0) ? x.b : (exp(x.b) - 1.0); result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0); return result; `}),hA=` vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.))); return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0)))); `,h$=kernel_funcs_utils_binaryKernelFunc({opSnippet:"return float(a == b);",packedOpSnippet:` return vec4(equal(a, b)); `,dtype:"bool",cpuKernelImpl:pd}),hR=kernel_funcs_utils_unaryKernelFunc({opSnippet:` // Error function is calculated approximately with elementary function. // See "Handbook of Mathematical Functions with Formulas, // Graphs, and Mathematical Tables", Abramowitz and Stegun. float p = ${oS}; float a1 = ${ow}; float a2 = ${oI}; float a3 = ${oN}; float a4 = ${oC}; float a5 = ${oE}; float sign = sign(x); x = abs(x); float t = 1.0 / (1.0 + p * x); return sign * (1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x)); `}),hF=kernel_funcs_utils_unaryKernelFunc({opSnippet:hu+` return exp(x); `,packedOpSnippet:` vec4 result = exp(x); bvec4 isNaN = isnan(x); result.r = isNaN.r ? x.r : result.r; result.g = isNaN.g ? x.g : result.g; result.b = isNaN.b ? x.b : result.b; result.a = isNaN.a ? x.a : result.a; return result; `,cpuKernelImpl:pm,dtype:"float32"});function kernels_ExpandDims_expandDims(t){let{inputs:r,attrs:a,backend:n}=t,{dim:s}=a,{input:i}=r,o=i.shape.length,l=i.shape.slice(),u=s;return s<0&&(assert(-(o+1)<=s,()=>`Axis must be in the interval [${-(o+1)}, ${o}]`),u=o+s+1),l.splice(u,0,1),kernels_Reshape_reshape({inputs:{x:i},backend:n,attrs:{shape:l}})}let hD="return exp(x) - 1.0;",hP=kernel_funcs_utils_unaryKernelFunc({opSnippet:hD,packedOpSnippet:hD,cpuKernelImpl:pf});let FFTProgram=class FFTProgram{constructor(t,r,a){let n;this.variableNames=["real","imag"];const s=r[1];this.outputShape=r;const i=a?`2.0 * ${Math.PI}`:`-2.0 * ${Math.PI}`,o=a?`${s}.0`:"1.0";if("real"===t)n="return real * expR - imag * expI;";else if("imag"===t)n="return real * expI + imag * expR;";else throw Error(`FFT component must be either "real" or "imag", got ${t}.`);this.userCode=` const float exponentMultiplier = ${i}; float unaryOpComplex(float real, float expR, float imag, float expI) { ${n} } float mulMatDFT(int batch, int index) { float indexRatio = float(index) / float(${s}); float exponentMultiplierTimesIndexRatio = exponentMultiplier * indexRatio; float result = 0.0; for (int i = 0; i < ${s}; i++) { // x = (-2|2 * PI / N) * index * i; float x = exponentMultiplierTimesIndexRatio * float(i); float expR = cos(x); float expI = sin(x); float real = getReal(batch, i); float imag = getImag(batch, i); result += unaryOpComplex(real, expR, imag, expI) / ${o}; } return result; } void main() { ivec2 coords = getOutputCoords(); setOutput(mulMatDFT(coords[0], coords[1])); } `}};function FFT_impl_fftImpl(t,r,a){let n=a.texData.get(t.dataId),s=sizeFromShape(t.shape),i=t.shape[t.shape.length-1],o=kernels_Reshape_reshape({inputs:{x:t},backend:a,attrs:{shape:[s/i,i]}}),l=o.shape,u=new FFTProgram("real",l,r),p=new FFTProgram("imag",l,r),m=[{dataId:n.complexTensorInfos.real.dataId,dtype:n.complexTensorInfos.real.dtype,shape:l},{dataId:n.complexTensorInfos.imag.dataId,dtype:n.complexTensorInfos.imag.dtype,shape:l}],y=a.runWebGLProgram(u,m,"float32"),_=a.runWebGLProgram(p,m,"float32"),w=kernels_Complex_complex({inputs:{real:y,imag:_},backend:a});a.disposeIntermediateTensorInfo(y),a.disposeIntermediateTensorInfo(_);let I=kernels_Reshape_reshape({inputs:{x:w},backend:a,attrs:{shape:t.shape}});return a.disposeIntermediateTensorInfo(o),a.disposeIntermediateTensorInfo(w),I}function kernels_FFT_fft(t){let{inputs:r,backend:a}=t,{input:n}=r;return FFT_impl_fftImpl(n,!1,a)}let FillProgram=class FillProgram{constructor(t,r){this.outputShape=[],this.customUniforms=[{name:"value",type:"float"}],this.variableNames=["x"],this.outputShape=t,this.userCode=` void main() { // Input can be obtained from uniform value. setOutput(value); } `}};function kernels_Fill_fill(t){let{backend:r,attrs:a}=t,{shape:n,value:s}=a,{dtype:i}=a;if("string"===(i=i||inferDtype(s))){let t=getArrayFromDType(i,sizeFromShape(n));return t.fill(s),r.makeTensorInfo(n,i,t)}{let t=new FillProgram(n,s),a=[[s]];return r.runWebGLProgram(t,[],i,a)}}let FlipLeftRightProgram=class FlipLeftRightProgram{constructor(t){this.variableNames=["Image"],this.outputShape=[];const r=t[2];this.outputShape=t,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int x = coords[2]; int coordX = ${r} - x - 1; float outputValue; if(coordX >= 0 && coordX < ${r}) { outputValue = getImage(coords[0], coords[1], coordX, coords[3]); } else { outputValue = getImage(coords[0], coords[1], coords[2], coords[3]); } setOutput(outputValue); } `}};let hO="return floor(x);",hM=kernel_funcs_utils_unaryKernelFunc({opSnippet:hO,packedOpSnippet:hO,cpuKernelImpl:pg}),hL=kernel_funcs_utils_binaryKernelFunc({opSnippet:` float s = sign(a) * sign(b); int ia = round(a); int ib = round(b); if (ib != 0) { // Windows (D3D) wants guaranteed non-zero int division at compile-time. return float(idiv(ia, ib, s)); } else { return NAN; } `,packedOpSnippet:` ivec4 ia = round(a); ivec4 ib = round(b); bvec4 cond = notEqual(ib, ivec4(0)); ivec4 result = ivec4(0); vec4 s = sign(a) * sign(b); // Windows (D3D) wants guaranteed non-zero int division at compile-time. if (cond[0]) { result[0] = idiv(ia[0], ib[0], s[0]); } if (cond[1]) { result[1] = idiv(ia[1], ib[1], s[1]); } if (cond[2]) { result[2] = idiv(ia[2], ib[2], s[2]); } if (cond[3]) { result[3] = idiv(ia[3], ib[3], s[3]); } return vec4(result); `,dtype:"int32"});let FromPixelsProgram=class FromPixelsProgram{constructor(t){this.variableNames=["A"];const r=getGlslDifferences(),[a,n]=t;this.outputShape=t,this.userCode=` void main() { ivec3 coords = getOutputCoords(); int texR = coords[0]; int texC = coords[1]; int depth = coords[2]; vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${n}.0, ${a}.0); vec4 values = ${r.texture2D}(A, uv); float value; if (depth == 0) { value = values.r; } else if (depth == 1) { value = values.g; } else if (depth == 2) { value = values.b; } else if (depth == 3) { value = values.a; } setOutput(floor(value * 255.0 + 0.5)); } `}};let FromPixelsPackedProgram=class FromPixelsPackedProgram{constructor(t){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const r=getGlslDifferences(),[a,n]=t;this.outputShape=t,this.userCode=` void main() { ivec3 coords = getOutputCoords(); int texR = coords[0]; int texC = coords[1]; int depth = coords[2]; vec4 result = vec4(0.); for(int row=0; row<=1; row++) { for(int col=0; col<=1; col++) { texC = coords[1] + row; depth = coords[2] + col; vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${n}.0, ${a}.0); vec4 values = ${r.texture2D}(A, uv); float value; if (depth == 0) { value = values.r; } else if (depth == 1) { value = values.g; } else if (depth == 2) { value = values.b; } else if (depth == 3) { value = values.a; } result[row * 2 + col] = floor(value * 255.0 + 0.5); } } ${r.output} = result; } `}};let hz=eV.getBool("CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU");function FromPixels_fromPixels(t){let{inputs:r,backend:a,attrs:n}=t,{pixels:s}=r,{numChannels:i}=n,o="u">typeof HTMLVideoElement&&s instanceof HTMLVideoElement,l="u">typeof HTMLImageElement&&s instanceof HTMLImageElement,[u,p]=o?[s.videoWidth,s.videoHeight]:[s.width,s.height],y=[p,u],_=[p,u,i];if(l||o){let t=eV.getBool("CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU");(null==m||t!==hz)&&(hz=t,m=document.createElement("canvas").getContext("2d",{willReadFrequently:hz})),m.canvas.width=u,m.canvas.height=p,m.drawImage(s,0,0,u,p),s=m.canvas}let w=a.makeTensorInfo(y,"int32");a.texData.get(w.dataId).usage=el.PIXELS,a.gpgpu.uploadPixelDataToTexture(a.getTexture(w.dataId),s);let I=eV.getBool("WEBGL_PACK")?new FromPixelsPackedProgram(_):new FromPixelsProgram(_),C=a.runWebGLProgram(I,[w],"int32");return a.disposeData(w.dataId),C}function fusedConv2d(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i,filter:o,bias:l,preluActivationWeights:u}=a,{strides:p,pad:m,dataFormat:y,dilations:_,dimRoundingMode:w,activation:I,leakyreluAlpha:C}=s,E=convertConv2DDataFormat(y),A=computeConv2DInfo(i.shape,o.shape,p,_,m,w,!1,E),$=[],F=null!=l,D=null!=u,P="leakyrelu"===I,prepareInputs=()=>{let t=[i,o],alignInputWithDataFormat=(t,r)=>{if("NCHW"===r&&1===t.shape.length&&1!==t.shape[0]){let r=kernels_Reshape_reshape({inputs:{x:t},backend:n,attrs:{shape:[t.shape[0],1,1]}});return $.push(r),r}return t};if(F&&t.push(alignInputWithDataFormat(l,y)),D&&t.push(alignInputWithDataFormat(u,y)),P){let r=n.makeTensorInfo([],"float32",createScalarValue(C,"float32"));t.push(r),$.push(r)}return t};if(1===A.filterHeight&&1===A.filterWidth&&1===A.dilationHeight&&1===A.dilationWidth&&1===A.strideHeight&&1===A.strideWidth&&("SAME"===A.padInfo.type||"VALID"===A.padInfo.type))r=conv2dByMatMul({x:i,filter:o,convInfo:A,backend:n,bias:l,activation:I,preluActivationWeights:u,leakyreluAlpha:C});else if(A.strideWidth<=2&&"channelsLast"===E&&eV.getBool("WEBGL_EXP_CONV")){let t=new Conv2DPackedProgram(A,F,I?mapActivationToShaderProgram(I,!0):null,D,P),a=[[A.padInfo.top,A.padInfo.left],[A.strideHeight,A.strideWidth],[A.dilationHeight,A.dilationWidth],[A.inHeight,A.inWidth]],s=prepareInputs();r=n.runWebGLProgram(t,s,"float32",a)}else if(eV.getBool("WEBGL_CONV_IM2COL"))r=conv2dWithIm2Row({x:i,filter:o,convInfo:A,backend:n,bias:l,activation:I,preluActivationWeights:u,leakyreluAlpha:C});else{let t=new Conv2DProgram(A,F,I?mapActivationToShaderProgram(I,!1):null,D,P),a=prepareInputs();r=n.runWebGLProgram(t,a,"float32")}let L=kernels_Reshape_reshape({inputs:{x:r},backend:n,attrs:{shape:A.outShape}});return $.push(r),$.forEach(t=>n.disposeIntermediateTensorInfo(t)),L}function FusedDepthwiseConv2D_fusedDepthwiseConv2D(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i,filter:o,bias:l,preluActivationWeights:u}=a,{strides:p,pad:m,dilations:y,dimRoundingMode:_,activation:w,leakyreluAlpha:I}=s,C=[],E=y;null==E&&(E=[1,1]),assert(eitherStridesOrDilationsAreOne(p,E),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${p} and dilations '${E}'`);let A=computeConv2DInfo(i.shape,o.shape,p,E,m,_,!0),$=eV.getBool("WEBGL_PACK_DEPTHWISECONV")&&A.strideWidth<=2&&A.outChannels/A.inChannels==1,F=w?mapActivationToShaderProgram(w,$):null,D=[i,o],P=null!=l,L=null!=u,z="leakyrelu"===w;if(P&&D.push(l),L&&D.push(u),z){let t=n.makeTensorInfo([],"float32",createScalarValue(I,"float32"));D.push(t),C.push(t)}r=$?new DepthwiseConvPacked2DProgram(A,P,F,L,z):new DepthwiseConv2DProgram(A,P,F,L,z);let B=[[A.padInfo.top,A.padInfo.left],[A.strideHeight,A.strideWidth],[A.dilationHeight,A.dilationWidth],[A.inHeight,A.inWidth]],G=n.runWebGLProgram(r,D,"float32",B);return C.forEach(t=>n.disposeIntermediateTensorInfo(t)),G}let GatherNDProgram=class GatherNDProgram{constructor(t,r,a,n){this.sliceDim=t,this.strides=r,this.paramsShape=n,this.variableNames=["x","indices"],this.outputShape=a;const s=getCoordsDataType(a.length);let i=` int index;`;for(let t=0;t= ${this.paramsShape[t]}; flattenIndex += index * ${this.strides[t]};`;this.userCode=` void main() { ${s} coords = getOutputCoords(); int flattenIndex = 0; bool out_of_bounds = false; ${i} setOutput(out_of_bounds ? 0.0 : getX(flattenIndex, coords[1])); } `}};function GatherNd_gatherNd(t){let{inputs:r,backend:a}=t,{params:n,indices:s}=r,i=s.shape,o=i[i.length-1],l=sizeFromShape(n.shape),[u,p,m,y]=prepareAndValidate(n,s),_=kernels_Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:[p,o]}}),w=kernels_Reshape_reshape({inputs:{x:n},backend:a,attrs:{shape:[sizeFromShape(n.shape)/m,m]}});if(a.shouldExecuteOnCPU([n,s])||"string"===n.dtype){let t=py(a.readSync(s.dataId),a.bufferSync(n),n.dtype,p,o,m,y,n.shape,l);return a.makeTensorInfo(u,n.dtype,t.values)}let I=new GatherNDProgram(o,y,[p,m],n.shape),C=a.runWebGLProgram(I,[w,_],w.dtype),E=kernels_Reshape_reshape({inputs:{x:C},backend:a,attrs:{shape:u}});return a.disposeIntermediateTensorInfo(_),a.disposeIntermediateTensorInfo(w),a.disposeIntermediateTensorInfo(C),E}let GatherProgram=class GatherProgram{constructor(t,r){this.variableNames=["A","indices"],this.outputShape=r,this.rank=r.length;const a=getCoordsDataType(this.rank),n=gather_gpu_getSourceCoords(t,2);this.userCode=` void main() { ${a} resRC = getOutputCoords(); int index = int(getIndices(resRC.x, resRC.z)); float inBounds = (index >= 0) && (index < ${t[2]}) ? 1.0 : 0.0; setOutput(inBounds * getA(${n})); } `}};function gather_gpu_getSourceCoords(t,r){let a=["resRC.x","resRC.y","resRC.z","resRC.w"],n=[];for(let r=0;r=0,()=>`GatherV2: the index value ${n} is not in [0, ${r-1}]`)}}let p=collectGatherOpShapeInfo(s,i,u,l),m=sizeFromShape(i.shape),y=[],_=kernels_Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:[p.batchSize,p.outerSize,p.dimSize,p.sliceSize]}}),w=kernels_Reshape_reshape({inputs:{x:i},backend:a,attrs:{shape:[p.batchSize,m/p.batchSize]}});y.push(_),y.push(w);let I=[p.batchSize,p.outerSize,m/p.batchSize,p.sliceSize];if(a.shouldExecuteOnCPU([s,i])||"string"===s.dtype){let t=a.bufferSync(w),r=px(a.bufferSync(_),t,I);return y.forEach(t=>a.disposeIntermediateTensorInfo(t)),a.makeTensorInfo(p.outputShape,r.dtype,r.values)}let C=new GatherProgram(_.shape,I),E=a.runWebGLProgram(C,[_,w],_.dtype);y.push(E);let A=kernels_Reshape_reshape({inputs:{x:E},backend:a,attrs:{shape:p.outputShape}});return y.forEach(t=>a.disposeIntermediateTensorInfo(t)),A}let hV=kernel_funcs_utils_binaryKernelFunc({opSnippet:"return float(a > b);",packedOpSnippet:` return vec4(greaterThan(a, b)); `,cpuKernelImpl:pv,dtype:"bool"}),hB=kernel_funcs_utils_binaryKernelFunc({opSnippet:"return float(a >= b);",packedOpSnippet:` return vec4(greaterThanEqual(a, b)); `,dtype:"bool",cpuKernelImpl:p_});function kernels_IFFT_ifft(t){let{inputs:r,backend:a}=t,{input:n}=r;return FFT_impl_fftImpl(n,!0,a)}let hW=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return float(!isnan(x) && !isinf(x));",dtype:"bool"}),hU=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return float(isinf(x));",dtype:"bool"}),hG=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return float(isnan(x));",dtype:"bool"}),hj=kernel_funcs_utils_binaryKernelFunc({opSnippet:"return float(a < b);",packedOpSnippet:` return vec4(lessThan(a, b)); `,cpuKernelImpl:pT,dtype:"bool"}),hK=kernel_funcs_utils_binaryKernelFunc({opSnippet:"return float(a <= b);",packedOpSnippet:` return vec4(lessThanEqual(a, b)); `,cpuKernelImpl:pk,dtype:"bool"});function LinSpace_linSpace(t){let{backend:r,attrs:a}=t,{start:n,stop:s,num:i}=a,o=pS(n,s,i);return r.makeTensorInfo([o.length],"float32",o)}let hH=kernel_funcs_utils_unaryKernelFunc({opSnippet:hu+` return x < 0.0 ? 0./0. : log(x); `,packedOpSnippet:` vec4 result = log(x); bvec4 isNaN = isnan(x); result.r = isNaN.r ? x.r : (x.r < 0.0 ? 0./0. : result.r); result.g = isNaN.g ? x.g : (x.g < 0.0 ? 0./0. : result.g); result.b = isNaN.b ? x.b : (x.b < 0.0 ? 0./0. : result.b); result.a = isNaN.a ? x.a : (x.a < 0.0 ? 0./0. : result.a); return result; `,cpuKernelImpl:pw}),hq=kernel_funcs_utils_unaryKernelFunc({opSnippet:hu+` return log(1.0 + x); `}),hX=kernel_funcs_utils_binaryKernelFunc({opSnippet:"return float(a >= 1.0 && b >= 1.0);",packedOpSnippet:` return vec4( vec4(greaterThanEqual(a, vec4(1.0))) * vec4(greaterThanEqual(b, vec4(1.0)))); `,dtype:"bool"}),hY=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return float(!(x >= 1.0));"}),hZ=kernel_funcs_utils_binaryKernelFunc({opSnippet:"return float(a >= 1.0 || b >= 1.0);",packedOpSnippet:` return min( vec4(greaterThanEqual(a, vec4(1.0))) + vec4(greaterThanEqual(b, vec4(1.0))), vec4(1.0)); `,dtype:"bool"});let LRNProgram=class LRNProgram{constructor(t,r,a,n,s){let i;this.variableNames=["x"],this.outputShape=[];const o=t[3]-1;this.outputShape=t;const l=`float(${a}) + float(${n}) * sum`;i=.5===s?`inversesqrt(${l})`:1===s?`1.0/(${l})`:`exp(log(${l}) * float(-${s}));`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int r = coords[1]; int c = coords[2]; int d = coords[3]; float x = getX(b, r, c, d); float sum = 0.0; for (int j = -${r}; j <= ${r}; j++) { int idx = d + j; if (idx >= 0 && idx <= ${o}) { float z = getX(b, r, c, idx); sum += z * z; } } float val = x * ${i}; setOutput(val); } `}};let LRNPackedProgram=class LRNPackedProgram{constructor(t,r,a,n,s){let i;this.variableNames=["x"],this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0;const o=t[3]-1;this.outputShape=t;const l=`float(${a}) + float(${n}) * sum`;i=.5===s?`inversesqrt(${l})`:1===s?`1.0/(${l})`:`exp(log(${l}) * float(-${s}));`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords.x; int r = coords.y; int c = coords.z; int d = coords.w; bool hasNextCol = d < ${this.outputShape[3]}; bool hasNextRow = c < ${this.outputShape[2]}; vec4 sum = vec4(0.); vec4 xFragAtOutputCoords = getX(b, r, c, d); vec4 xAtOutputCoords = vec4( getChannel(xFragAtOutputCoords, vec2(c, d)), hasNextCol ? getChannel(xFragAtOutputCoords, vec2(c, d + 1)) : 0.0, hasNextRow ? getChannel(xFragAtOutputCoords , vec2(c + 1, d)) : 0.0, (hasNextRow && hasNextCol) ? getChannel(xFragAtOutputCoords, vec2(c + 1, d + 1)) : 0.0 ); int firstChannel = d - ${r}; vec2 cache = vec2(0.); if(firstChannel >= 0){ vec4 firstChannelFrag = getX(b, r, c, firstChannel); cache.x = getChannel(firstChannelFrag, vec2(c, firstChannel)); if(hasNextRow){ cache.y = getChannel(firstChannelFrag, vec2(c + 1, firstChannel)); } } ivec2 depth = ivec2(d, d + 1); for (int j = - ${r}; j <= ${r}; j++) { ivec2 idx = depth + j; bvec2 aboveLowerBound = greaterThanEqual(idx, ivec2(0)); bvec2 belowUpperBound = lessThanEqual(idx, ivec2(${o})); bool depthInRange = aboveLowerBound.x && belowUpperBound.x; bool depthPlusOneInRange = aboveLowerBound.y && belowUpperBound.y; if(depthInRange || depthPlusOneInRange){ vec4 z = vec4(0.); vec4 xFragAtCurrentDepth; z.xz = cache.xy; if(depthPlusOneInRange && hasNextCol){ xFragAtCurrentDepth = idx.y != d ? getX(b, r, c, idx.y) : xFragAtOutputCoords; z.y = getChannel(xFragAtCurrentDepth, vec2(c, idx.y)); if(hasNextRow){ z.w = getChannel(xFragAtCurrentDepth, vec2(c + 1, idx.y)); } } cache.xy = z.yw; sum += z * z; } } vec4 result = xAtOutputCoords * ${i}; setOutput(result); } `}};let LRNGradProgram=class LRNGradProgram{constructor(t,r,a,n,s){this.variableNames=["inputImage","outputImage","dy"],this.outputShape=[],this.outputShape=t,this.depth=t[3],this.depthRadius=r,this.bias=a,this.alpha=n,this.beta=s,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int r = coords[1]; int c = coords[2]; float result = 0.0; for (int d = 0; d < ${this.depth}; ++d) { int depthBegin = int(max(0.0, float(d - ${r}))); int depthEnd = int(min(float(${this.depth}), float(d + ${r} + 1))); const int MIN_DEPTH_BEGIN = 0; const int MAX_DEPTH_END = ${this.depth}; float norm = 0.0; for (int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k) { if (k < depthBegin){ continue; } else if (k >= depthBegin && k < depthEnd) { norm += getInputImage(b, r, c, k) * getInputImage(b, r, c, k); } else { break; } } norm = float(${n}) * norm + float(${a}); for(int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k){ if (k < depthBegin){ continue; } else if (k >= depthBegin && k < depthEnd){ float dyi = -2.0 * float(${n}) * float(${s}) * getInputImage(b, r, c, k) * getOutputImage(b, r, c, d) / norm; if (k == d) { dyi += pow(norm, -1.0 * ${s}); } if (k == coords[3]) { dyi *= getDy(b, r, c, d); result += dyi; } } else { break; } } } setOutput(result); } `}};function Max_impl_maxImpl(t,r,a,n){let s=sizeFromShape(r),i=sizeFromShape(t.shape),o=kernels_Reshape_reshape({inputs:{x:t},attrs:{shape:[i/s,s]},backend:n}),l=reduce(o,t.dtype,"max",n),u=kernels_Reshape_reshape({inputs:{x:l},attrs:{shape:a},backend:n});return n.disposeIntermediateTensorInfo(o),n.disposeIntermediateTensorInfo(l),u}function kernels_Max_max(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a,{reductionIndices:o,keepDims:l}=s,u=i.shape.length,p=parseAxisParam(o,i.shape),m=p,y=getAxesPermutation(m,u),_=null!=y,w=n.shouldExecuteOnCPU([i]),I=i;if(_){if(w){let t=n.texData.get(I.dataId).values,r=Array(u);for(let t=0;t`Error in maxPool: Either strides or dilations must be 1. Got strides ${o} and dilations '1'`);let p=computePool2DInfo(s.shape,i,o,1,l,u);if(1===p.filterWidth&&1===p.filterHeight&&arraysEqual(p.inShape,p.outShape))return kernels_Identity_identity({inputs:{x:s},backend:a});let m=new Pool2DProgram(p,"max",!1);return a.runWebGLProgram(m,[s],s.dtype)}function MaxPool3D_maxPool3d(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{filterSize:i,strides:o,pad:l,dataFormat:u,dimRoundingMode:p}=n,m=new Pool3DProgram(computePool3DInfo(s.shape,i,o,[1,1,1],l,p,u),"max",!1);return a.runWebGLProgram(m,[s],s.dtype)}let MaxPool2DBackpropProgram=class MaxPool2DBackpropProgram{constructor(t){this.variableNames=["dy","maxPos"],this.outputShape=t.inShape;const r=t.strideHeight,a=t.strideWidth,n=t.dilationHeight,s=t.effectiveFilterHeight,i=t.effectiveFilterWidth,o=s-1-t.padInfo.top,l=i-1-t.padInfo.left;this.userCode=` const ivec2 pads = ivec2(${o}, ${l}); void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int d = coords[3]; ivec2 dyRCCorner = coords.yz - pads; int dyRCorner = dyRCCorner.x; int dyCCorner = dyRCCorner.y; // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; for (int wR = 0; wR < ${s}; wR += ${n}) { float dyR = float(dyRCorner + wR) / ${r}.0; if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); for (int wC = 0; wC < ${i}; wC++) { float dyC = float(dyCCorner + wC) / ${a}.0; if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); float dyValue = getDy(b, idyR, idyC, d); int maxPosValue = ${s*i-1} - int(getMaxPos(b, idyR, idyC, d)); // Get the current value, check it against the value from the // position matrix. int curPosValue = wR * ${i} + wC; float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0); dotProd += dyValue * mask; } } setOutput(dotProd); } `}};let MaxPool3DBackpropProgram=class MaxPool3DBackpropProgram{constructor(t){this.variableNames=["dy","maxPos"],this.outputShape=t.inShape;const r=t.strideDepth,a=t.strideHeight,n=t.strideWidth,s=t.dilationDepth,i=t.dilationHeight,o=t.dilationWidth,l=t.effectiveFilterDepth,u=t.effectiveFilterHeight,p=t.effectiveFilterWidth,m=l-1-t.padInfo.front,y=u-1-t.padInfo.top,_=p-1-t.padInfo.left;this.userCode=` const ivec3 pads = ivec3(${m}, ${y}, ${_}); void main() { ivec5 coords = getOutputCoords(); int batch = coords.x; int ch = coords.u; ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads; int dyDCorner = dyCorner.x; int dyRCorner = dyCorner.y; int dyCCorner = dyCorner.z; // Convolve dy(?, ?, ?, ch) with pos mask(:, :, :, d) to get // dx(xD, xR, xC, ch). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; for (int wD = 0; wD < ${l}; wD += ${s}) { float dyD = float(dyDCorner + wD) / ${r}.0; if (dyD < 0.0 || dyD >= ${t.outDepth}.0 || fract(dyD) > 0.0) { continue; } int idyD = int(dyD); for (int wR = 0; wR < ${u}; wR += ${i}) { float dyR = float(dyRCorner + wR) / ${a}.0; if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); for (int wC = 0; wC < ${p}; wC += ${o}) { float dyC = float(dyCCorner + wC) / ${n}.0; if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); float dyValue = getDy(batch, idyD, idyR, idyC, ch); int maxPosValue = ${l*u*p-1} - int(getMaxPos(batch, idyD, idyR, idyC, ch)); // Get the current value, check it against the value from the // position matrix. int curPosValue = wD * ${u} * ${p} + wR * ${p} + wC; float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0); dotProd += dyValue * mask; } } } setOutput(dotProd); } `}};function MaxPool3DGrad_maxPool3DGrad(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,input:i}=r,{filterSize:o,strides:l,pad:u,dimRoundingMode:p}=n,m=computePool3DInfo(i.shape,o,l,[1,1,1],u,p),y=new Pool3DProgram(m,"max",!0),_=a.runWebGLProgram(y,[i],i.dtype),w=new MaxPool3DBackpropProgram(m),I=a.runWebGLProgram(w,[s,_],i.dtype);return a.disposeIntermediateTensorInfo(_),I}function kernels_MaxPoolGrad_maxPoolGrad(t){let{inputs:r,backend:a,attrs:n}=t,{dy:s,input:i,output:o}=r;webgl_util_assertNotComplex([i,o],"maxPoolGrad");let{filterSize:l,strides:u,pad:p,dimRoundingMode:m}=n,y=computePool2DInfo(i.shape,l,u,1,p,m),_=new Pool2DProgram(y,"max",!0),w=a.runWebGLProgram(_,[i],i.dtype),I=new MaxPool2DBackpropProgram(y),C=a.runWebGLProgram(I,[s,w],i.dtype);return a.disposeIntermediateTensorInfo(w),C}function MaxPoolWithArgmax_impl_maxPoolWithArgmaxImpl(t,r,a,n){let s=new Pool2DProgram(a,"max",!1),i=n.runWebGLProgram(s,[t],"float32");return s=new Pool2DProgram(a,"max",!0,!0,r),[i,n.runWebGLProgram(s,[t],"float32")]}function meanImpl(t,r,a,n){let s=sizeFromShape(r),i=sizeFromShape(t.shape),o=kernels_Reshape_reshape({inputs:{x:t},attrs:{shape:[i/s,s]},backend:n}),l=reduce(o,"float32","mean",n),u=kernels_Reshape_reshape({inputs:{x:l},attrs:{shape:a},backend:n});return n.disposeIntermediateTensorInfo(o),n.disposeIntermediateTensorInfo(l),u}function kernels_Min_min(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a,{axis:o,keepDims:l}=s,u=i.shape.length,p=parseAxisParam(o,i.shape),m=p,y=getAxesPermutation(m,u),_=i;null!=y&&(_=kernels_Transpose_transpose({inputs:{x:i},backend:n,attrs:{perm:y}}),m=getInnerMostAxes(m.length,i.shape.length)),assertAxesAreInnerMostDims("min",m,u);let[w,I]=computeOutAndReduceShapes(_.shape,m),C=kernels_Reshape_reshape({inputs:{x:_},backend:n,attrs:{shape:[-1,sizeFromShape(I)]}}),E=reduce(C,C.dtype,"min",n);return r=l?kernels_Reshape_reshape({inputs:{x:E},backend:n,attrs:{shape:expandShapeToKeepDim(w,p)}}):kernels_Reshape_reshape({inputs:{x:E},backend:n,attrs:{shape:w}}),n.disposeIntermediateTensorInfo(C),n.disposeIntermediateTensorInfo(E),null!=y&&n.disposeIntermediateTensorInfo(_),r}let hQ=kernel_funcs_utils_binaryKernelFunc({opSnippet:ha+` return min(a, b); `,packedOpSnippet:` vec4 result = vec4(min(a, b)); bvec4 isNaNA = isnan(a); bvec4 isNaNB = isnan(b); bvec4 isNaN = bvec4(isNaNA.x || isNaNB.x, isNaNA.y || isNaNB.y, isNaNA.z || isNaNB.z, isNaNA.w || isNaNB.w); `+hn+` return result; `,cpuKernelImpl:pC});let MirrorPadProgram=class MirrorPadProgram{constructor(t,r,a){this.variableNames=["x"],this.outputShape=r.map((r,a)=>r[0]+t[a]+r[1]);const n=t.length,s=getCoordsDataType(n),i=r.map(t=>t[0]).join(","),o=r.map((r,a)=>r[0]+t[a]).join(","),l=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,n),u=+("reflect"!==a);if(1===n){this.userCode=` int start = ${i}; int end = ${o}; void main() { int outC = getOutputCoords(); if (outC < start) { outC = start * 2 - outC - ${u}; } else if(outC >= end) { outC = (end - 1) * 2 - outC + ${u}; } setOutput(getX(outC - start)); } `;return}this.userCode=` ${s} start = ${s}(${i}); ${s} end = ${s}(${o}); void main() { ${s} outC = getOutputCoords(); for (int i = 0; i < ${n}; i++) { if (outC[i] < start[i]) { outC[i] = start[i] * 2 - outC[i] - ${u}; } else if(outC[i] >= end[i]) { outC[i] = (end[i] - 1) * 2 - outC[i] + ${u}; } } ${s} coords = outC - start; setOutput(getX(${l})); } `}};let MirrorPadPackedProgram=class MirrorPadPackedProgram{constructor(t,r,a){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=r.map((r,a)=>r[0]+t[a]+r[1]);const n=t.length,s=getCoordsDataType(n),i=r.map(t=>t[0]).join(","),o=r.map((r,a)=>r[0]+t[a]).join(","),l=getChannels("rc",n),u=getChannels("source",n),p=`${l[n-1]} < ${this.outputShape[n-1]}`,m=1===n?"source":`vec2(${u.slice(-2).join()})`,y=+("reflect"!==a);let _="";if(1===n){const t=` ${s} source = rc; if (source < start) { source = start * 2 - source - ${y}; } else if (source >= end) { source = (end - 1) * 2 - source + ${y}; } source -= start; `;_=` ${s} rc = outputLoc; ${t} result[0] = getChannel(getX(${u.join()}), ${m}); ${l[n-1]} += 1; if(${p}) { ${t} result[1] = getChannel(getX(${u.join()}), ${m}); } `}else{const t=` ${s} source = rc; ${s} lt = ${s}(lessThan(source, start)); ${s} gte = ${s}(greaterThanEqual(source, end)); ${s} orig = 1 - (lt + gte); source = orig * source + lt * (start * 2 - source - ${y}) + gte * ((end - 1) * 2 - source + ${y}); source -= start; `;_=` ${s} rc = outputLoc; ${t} result[0] = getChannel(getX(${u.join()}), ${m}); ${l[n-1]} += 1; if(${p}) { ${t} result[1] = getChannel(getX(${u.join()}), ${m}); } rc = outputLoc; ${l[n-2]} += 1; if(${l[n-2]} < ${this.outputShape[n-2]}) { ${t} result[2] = getChannel(getX(${u.join()}), ${m}); ${l[n-1]} += 1; if(${p}) { ${t} result[3] = getChannel(getX(${u.join()}), ${m}); } } `}this.userCode=` const ${s} start = ${s}(${i}); const ${s} end = ${s}(${o}); void main() { ${s} outputLoc = getOutputCoords(); vec4 result = vec4(0.); ${_} setOutput(result); } `}};let h0=kernel_funcs_utils_binaryKernelFunc({opSnippet:`if (b == 0.0) return NAN; return mod(a, b);`,packedOpSnippet:` vec4 result = mod(a, b); bvec4 isNaN = equal(b, vec4(0.0)); `+hn+` return result; `});let MultinomialProgram=class MultinomialProgram{constructor(t,r,a){this.variableNames=["probs"],this.customUniforms=[{name:"seed",type:"float"}],this.outputShape=[t,a],this.userCode=` void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; float r = random(seed); float cdf = 0.0; for (int i = 0; i < ${r-1}; i++) { cdf += getProbs(batch, i); if (r < cdf) { setOutput(float(i)); return; } } // If no other event happened, last event happened. setOutput(float(${r-1})); } `}};let h1=kernel_funcs_utils_binaryKernelFunc({opSnippet:` if (a == b) { return 1.0; }; return a / b;`,packedOpSnippet:` // vec4 one = vec4(equal(a, b)); // return one + (vec4(1.0) - one) * a / b; vec4 result = a / b; if(a.x == b.x) { result.x = 1.; } if(a.y == b.y) { result.y = 1.; } if(a.z == b.z) { result.z = 1.; } if(a.w == b.w) { result.w = 1.; } return result; `,checkOutOfBounds:!0}),h2="return a - b;",h3=kernel_funcs_utils_binaryKernelFunc({opSnippet:h2,packedOpSnippet:h2,supportsComplex:!0,cpuKernelImpl:pZ});function kernels_Softmax_softmax(t){let{inputs:r,backend:a,attrs:n}=t,{logits:s}=r,{dim:i}=n,o=parseAxisParam([i],s.shape),l=kernels_Max_max({inputs:{x:s},backend:a,attrs:{reductionIndices:o,keepDims:!1}}),u=expandShapeToKeepDim(l.shape,o),p=kernels_Reshape_reshape({inputs:{x:l},backend:a,attrs:{shape:u}}),m=h3({inputs:{a:s,b:p},backend:a}),y=hF({inputs:{x:m},backend:a}),_=kernels_Sum_sum({inputs:{x:y},backend:a,attrs:{axis:o,keepDims:!1}}),w=kernels_Reshape_reshape({inputs:{x:_},backend:a,attrs:{shape:u}}),I=h1({inputs:{a:y,b:w},backend:a});return a.disposeIntermediateTensorInfo(l),a.disposeIntermediateTensorInfo(p),a.disposeIntermediateTensorInfo(m),a.disposeIntermediateTensorInfo(y),a.disposeIntermediateTensorInfo(_),a.disposeIntermediateTensorInfo(w),I}function kernels_Multinomial_multinomial(t){let{inputs:r,backend:a,attrs:n}=t,{logits:s}=r,{numSamples:i,seed:o,normalized:l}=n,u=l?s:kernels_Softmax_softmax({inputs:{logits:s},backend:a,attrs:{dim:s.shape.length-1}}),p=new MultinomialProgram(u.shape[0],u.shape[1],i),m=a.runWebGLProgram(p,[u],"int32",[[o]]);return l||a.disposeIntermediateTensorInfo(u),m}let h4=p2+` return -x; `,h6=` vec4 result = -x; bvec4 isNaN = isnan(x); result.r = isNaN.r ? x.r : result.r; result.g = isNaN.g ? x.g : result.g; result.b = isNaN.b ? x.b : result.b; result.a = isNaN.a ? x.a : result.a; return result; `;function kernels_Neg_neg(t){let r,{inputs:a,backend:n}=t,{x:s}=a;if(n.shouldExecuteOnCPU([s])){let[t,r]=pA(n.texData.get(s.dataId).values,s.shape,s.dtype);return n.makeTensorInfo(r,s.dtype,t)}return r=eV.getBool("WEBGL_PACK_UNARY_OPERATIONS")?new UnaryOpPackedProgram(s.shape,h6):new UnaryOpProgram(s.shape,h4),n.runWebGLProgram(r,[s],s.dtype)}let h5=nonMaxSuppressionV3Impl;function NonMaxSuppressionV3_nonMaxSuppressionV3(t){warn("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");let{inputs:r,backend:a,attrs:n}=t,{boxes:s,scores:i}=r,{maxOutputSize:o,iouThreshold:l,scoreThreshold:u}=n,{selectedIndices:p}=h5(a.readSync(s.dataId),a.readSync(i.dataId),o,l,u);return a.makeTensorInfo([p.length],"int32",new Int32Array(p))}let h8=nonMaxSuppressionV4Impl;function NonMaxSuppressionV4_nonMaxSuppressionV4(t){warn("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");let{inputs:r,backend:a,attrs:n}=t,{boxes:s,scores:i}=r,{maxOutputSize:o,iouThreshold:l,scoreThreshold:u,padToMaxOutputSize:p}=n,{selectedIndices:m,validOutputs:y}=h8(a.readSync(s.dataId),a.readSync(i.dataId),o,l,u,p);return[a.makeTensorInfo([m.length],"int32",new Int32Array(m)),a.makeTensorInfo([],"int32",new Int32Array([y]))]}let h7=nonMaxSuppressionV5Impl;function NonMaxSuppressionV5_nonMaxSuppressionV5(t){warn("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");let{inputs:r,backend:a,attrs:n}=t,{boxes:s,scores:i}=r,{maxOutputSize:o,iouThreshold:l,scoreThreshold:u,softNmsSigma:p}=n,{selectedIndices:m,selectedScores:y}=h7(a.readSync(s.dataId),a.readSync(i.dataId),o,l,u,p);return[a.makeTensorInfo([m.length],"int32",new Int32Array(m)),a.makeTensorInfo([y.length],"float32",new Float32Array(y))]}let OneHotProgram=class OneHotProgram{constructor(t,r,a,n){this.variableNames=["indices"],this.outputShape=[t,r],this.userCode=` void main() { ivec2 coords = getOutputCoords(); int index = round(getIndices(coords.x)); setOutput(mix(float(${n}), float(${a}), float(index == coords.y))); } `}};function kernels_ZerosLike_zerosLike(t){let{inputs:r,backend:a}=t,{x:n}=r;if("complex64"!==n.dtype)return kernels_Fill_fill({attrs:{shape:n.shape,dtype:n.dtype,value:"string"===n.dtype?"":0},backend:a});{let t=kernels_Real_real({inputs:{input:n},backend:a}),r=kernels_ZerosLike_zerosLike({inputs:{x:t},backend:a}),s=kernels_Imag_imag({inputs:{input:n},backend:a}),i=kernels_ZerosLike_zerosLike({inputs:{x:s},backend:a}),o=kernels_Complex_complex({inputs:{real:r,imag:i},backend:a});return a.disposeIntermediateTensorInfo(t),a.disposeIntermediateTensorInfo(r),a.disposeIntermediateTensorInfo(s),a.disposeIntermediateTensorInfo(i),o}}function kernels_OnesLike_onesLike(t){let{inputs:r,backend:a}=t,{x:n}=r;if("string"===n.dtype)throw Error("onesLike is not supported under string dtype");if("complex64"!==n.dtype)return kernels_Fill_fill({attrs:{shape:n.shape,dtype:n.dtype,value:1},backend:a});{let t=kernels_Real_real({inputs:{input:n},backend:a}),r=kernels_OnesLike_onesLike({inputs:{x:t},backend:a}),s=kernels_Imag_imag({inputs:{input:n},backend:a}),i=kernels_ZerosLike_zerosLike({inputs:{x:s},backend:a}),o=kernels_Complex_complex({inputs:{real:r,imag:i},backend:a});return a.disposeIntermediateTensorInfo(t),a.disposeIntermediateTensorInfo(r),a.disposeIntermediateTensorInfo(s),a.disposeIntermediateTensorInfo(i),o}}function Pack_pack(t){let{inputs:r,backend:a,attrs:n}=t,{axis:s}=n;if(1===r.length)return kernels_ExpandDims_expandDims({inputs:{input:r[0]},backend:a,attrs:{dim:s}});let i=r[0].shape,o=r[0].dtype;r.forEach(t=>{assertShapesMatch(i,t.shape,"All tensors passed to stack must have matching shapes"),assert(o===t.dtype,()=>"All tensors passed to stack must have matching dtypes")});let l=[],u=kernels_Concat_concat({inputs:r.map(t=>{let r=kernels_ExpandDims_expandDims({inputs:{input:t},backend:a,attrs:{dim:s}});return l.push(r),r}),backend:a,attrs:{axis:s}});return l.forEach(t=>a.disposeIntermediateTensorInfo(t)),u}let PadProgram=class PadProgram{constructor(t,r,a){this.variableNames=["x"],this.customUniforms=[{name:"value",type:"float"}],this.outputShape=r.map((r,a)=>r[0]+t[a]+r[1]);const n=t.length,s=getCoordsDataType(n),i=r.map(t=>t[0]).join(","),o=r.map((r,a)=>r[0]+t[a]).join(","),l=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,n);if(1===n){this.userCode=` int start = ${i}; int end = ${o}; void main() { int outC = getOutputCoords(); if (outC < start || outC >= end) { setOutput(value); } else { setOutput(getX(outC - start)); } } `;return}this.userCode=` ${s} start = ${s}(${i}); ${s} end = ${s}(${o}); void main() { ${s} outC = getOutputCoords(); if (any(lessThan(outC, start)) || any(greaterThanEqual(outC, end))) { setOutput(value); } else { ${s} coords = outC - start; setOutput(getX(${l})); } } `}};let PadPackedProgram=class PadPackedProgram{constructor(t,r,a){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.customUniforms=[{name:"value",type:"float"}],this.outputShape=r.map((r,a)=>r[0]+t[a]+r[1]);const n=t.length,s=getCoordsDataType(n),i=r.map(t=>t[0]).join(","),o=r.map((r,a)=>r[0]+t[a]).join(","),l=getChannels("rc",n),u=getChannels("source",n),p=`${l[n-1]} < ${this.outputShape[n-1]}`,m=1===n?"source":`vec2(${u.slice(-2).join()})`,y=[`${s} rc = outputLoc;`,`${l[n-1]} += 1; if(${p}) { `,1===n?"":`} rc = outputLoc; ${l[n-2]} += 1; if(${l[n-2]} < ${this.outputShape[n-2]}) {`,1===n?"":` ${l[n-1]} += 1; if(${p}) {`],_=1===n?"rc < start || rc >= end":"any(lessThan(rc, start)) || any(greaterThanEqual(rc, end))";let w="";for(let t=0,r=1===n?2:4;t{let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{paddings:i,constantValue:o}=n;if(0===sizeFromShape(s.shape))return kernels_Fill_fill({backend:a,attrs:{shape:i.map((t,r)=>t[0]+s.shape[r]+t[1]),value:o,dtype:s.dtype}});let l=eV.getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new PadPackedProgram(s.shape,i,o):new PadProgram(s.shape,i,o),u=[[o]];return a.runWebGLProgram(l,[s],s.dtype,u)},h9=kernel_funcs_utils_binaryKernelFunc({opSnippet:` if(a < 0.0 && floor(b) < b){ return NAN; } if (b == 0.0) { return 1.0; } return (round(mod(b, 2.0)) != 1) ? pow(abs(a), b) : sign(a) * pow(abs(a), b); `,packedOpSnippet:` // isModRound1 has 1 for components with round(mod(b, 2.0)) == 1, 0 otherwise. vec4 isModRound1 = vec4(equal(round(mod(b, 2.0)), ivec4(1))); vec4 multiplier = sign(a) * isModRound1 + (vec4(1.0) - isModRound1); vec4 result = multiplier * pow(abs(a), b); // Ensure that a^0 = 1, including 0^0 = 1 as this correspond to TF and JS bvec4 isExpZero = equal(b, vec4(0.0)); result.r = isExpZero.r ? 1.0 : result.r; result.g = isExpZero.g ? 1.0 : result.g; result.b = isExpZero.b ? 1.0 : result.b; result.a = isExpZero.a ? 1.0 : result.a; bvec4 isNaN1 = lessThan(a, vec4(0.0)); bvec4 isNaN2 = lessThan(floor(b), b); bvec4 isNaN = bvec4(isNaN1.x && isNaN2.x, isNaN1.y && isNaN2.y, isNaN1.z && isNaN2.z, isNaN1.w && isNaN2.w); `+hn+` return result; `});function kernels_Prod_prod(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a,{axis:o,keepDims:l}=s,u=i.shape.length,p=[],m=parseAxisParam(o,i.shape),y=m,_=getAxesPermutation(y,u),w=i;if(null!=_&&(w=kernels_Transpose_transpose({inputs:{x:i},backend:n,attrs:{perm:_}}),y=getInnerMostAxes(y.length,u),p.push(w)),assertAxesAreInnerMostDims("prod",y,u),n.shouldExecuteOnCPU([w])){let t=n.texData.get(w.dataId).values,{outVals:a,outShape:s,outDtype:i}=pR(w.shape,w.dtype,t,y);r=n.makeTensorInfo(s,i,a)}else{let[t,a]=computeOutAndReduceShapes(w.shape,y),s=kernels_Reshape_reshape({inputs:{x:w},backend:n,attrs:{shape:[-1,sizeFromShape(a)]}}),o=reduce(s,sumOutType(i.dtype),"prod",n);r=kernels_Reshape_reshape({inputs:{x:o},backend:n,attrs:{shape:t}}),p.push(s),p.push(o)}if(l){p.push(r);let t=expandShapeToKeepDim(r.shape,m);r=kernels_Reshape_reshape({inputs:{x:r},backend:n,attrs:{shape:t}})}return p.forEach(t=>n.disposeIntermediateTensorInfo(t)),r}function kernels_RaggedGather_raggedGather(t){let{inputs:r,backend:a,attrs:n}=t,{paramsNestedSplits:s,paramsDenseValues:i,indices:o}=r,{outputRaggedRank:l}=n,u=s.map(t=>a.readSync(t.dataId)),p=s.map(t=>t.shape),m=a.readSync(i.dataId),y=a.readSync(o.dataId),[_,w,I]=pF(u,p,m,i.shape,i.dtype,y,o.shape,l),C=_.map(t=>a.makeTensorInfo([t.length],"int32",t)),E=a.makeTensorInfo(I,i.dtype,w);return C.concat([E])}function kernels_RaggedRange_raggedRange(t){let{inputs:r,backend:a}=t,{starts:n,limits:s,deltas:i}=r,o=a.readSync(n.dataId),l=a.readSync(s.dataId),u=a.readSync(i.dataId),[p,m]=pD(o,n.shape,n.dtype,l,s.shape,u,i.shape);return[a.makeTensorInfo([p.length],"int32",p),a.makeTensorInfo([m.length],n.dtype,m)]}function kernels_RaggedTensorToTensor_raggedTensorToTensor(t){let{inputs:r,backend:a,attrs:n}=t,{shape:s,values:i,defaultValue:o,rowPartitionTensors:l}=r,{rowPartitionTypes:u}=n,p=a.readSync(s.dataId),m=a.readSync(i.dataId),y=a.readSync(o.dataId),_=l.map(t=>a.readSync(t.dataId)),w=l.map(t=>t.shape),[I,C]=pP(p,s.shape,m,i.shape,i.dtype,y,o.shape,_,w,u);return a.makeTensorInfo(I,i.dtype,C)}let kernels_Range_range=t=>{let{backend:r,attrs:a}=t,{start:n,stop:s,step:i,dtype:o}=a,l=pO(n,s,i,o);return r.makeTensorInfo([l.length],o,l)},ce=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return 1.0 / x;"}),ct=kernel_funcs_utils_unaryKernelFunc({opSnippet:p2+` return (x < 0.0) ? 0.0 : x; `,packedOpSnippet:` vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0))); bvec4 isNaN = isnan(x); result.r = isNaN.r ? x.r : result.r; result.g = isNaN.g ? x.g : result.g; result.b = isNaN.b ? x.b : result.b; result.a = isNaN.a ? x.a : result.a; return result; `}),cr=kernel_funcs_utils_unaryKernelFunc({opSnippet:p2+` return (x < 0.0) ? 0.0 : min(6.0, x); `,packedOpSnippet:` vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0))); bvec4 isNaN = isnan(x); result.r = isNaN.r ? x.r : result.r; result.g = isNaN.g ? x.g : result.g; result.b = isNaN.b ? x.b : result.b; result.a = isNaN.a ? x.a : result.a; return result; `});let ResizeBilinearProgram=class ResizeBilinearProgram{constructor(t,r,a,n,s){this.variableNames=["A"],this.outputShape=[];const[i,o,l,u]=t;this.outputShape=[i,r,a,u];const p=[n&&r>1?o-1:o,n&&a>1?l-1:l],m=[n&&r>1?r-1:r,n&&a>1?a-1:a];this.userCode=` const vec2 effectiveInputOverOutputRatioRC = vec2( ${p[0]/m[0]}, ${p[1]/m[1]}); const vec2 inputShapeRC = vec2(${o}.0, ${l}.0); void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int d = coords[3]; ivec2 yRC = coords.yz; // Fractional source index. vec2 sourceFracIndexRC = ${s?"(vec2(yRC) + vec2(0.5)) * effectiveInputOverOutputRatioRC - vec2(0.5)":"vec2(yRC) * effectiveInputOverOutputRatioRC"}; // Compute the four integer indices. ivec2 sourceFloorRC = ivec2(max(sourceFracIndexRC, vec2(0.0))); ivec2 sourceCeilRC = ivec2( min(inputShapeRC - 1.0, ceil(sourceFracIndexRC))); float topLeft = getA(b, sourceFloorRC.x, sourceFloorRC.y, d); float bottomLeft = getA(b, sourceCeilRC.x, sourceFloorRC.y, d); float topRight = getA(b, sourceFloorRC.x, sourceCeilRC.y, d); float bottomRight = getA(b, sourceCeilRC.x, sourceCeilRC.y, d); vec2 fracRC = sourceFracIndexRC - vec2(sourceFloorRC); float top = topLeft + (topRight - topLeft) * fracRC.y; float bottom = bottomLeft + (bottomRight - bottomLeft) * fracRC.y; float newValue = top + (bottom - top) * fracRC.x; setOutput(newValue); } `}};let ResizeBilinearPackedProgram=class ResizeBilinearPackedProgram{constructor(t,r,a,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=[];const[i,o,l,u]=t;this.outputShape=[i,r,a,u];const p=[n&&r>1?o-1:o,n&&a>1?l-1:l],m=[n&&r>1?r-1:r,n&&a>1?a-1:a];this.userCode=` const vec3 effectiveInputOverOutputRatioRC = vec3( ${p[0]/m[0]}, ${p[1]/m[1]}, ${p[1]/m[1]}); const vec3 inputShapeRC = vec3(${o}.0, ${l}.0, ${l}.0); float getAValue(int b, int r, int c, int d) { return getChannel(getA(b, r, c, d), vec2(c, d)); } void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int d = coords[3]; // Calculate values for next column in yRC.z. ivec3 yRC = coords.yzz + ivec3(0, 0, 1); // Fractional source index. vec3 sourceFracIndexRC = ${s?"(vec3(yRC) + vec3(0.5)) * effectiveInputOverOutputRatioRC - vec3(0.5)":"vec3(yRC) * effectiveInputOverOutputRatioRC"}; // Compute the four integer indices. ivec3 sourceFloorRC = ivec3(max(sourceFracIndexRC, vec3(0.0))); ivec3 sourceCeilRC = ivec3( min(inputShapeRC - 1.0, ceil(sourceFracIndexRC))); // Should we calculate next column and row elements in 2x2 packed cell. bool hasNextCol = d < ${u-1}; bool hasNextRow = coords.z < ${a-1}; // In parallel, construct four corners for all four components in // packed 2x2 cell. vec4 topLeft = vec4( getAValue(b, sourceFloorRC.x, sourceFloorRC.y, d), hasNextCol ? getAValue(b, sourceFloorRC.x, sourceFloorRC.y, d + 1) : 0.0, hasNextRow ? getAValue(b, sourceFloorRC.x, sourceFloorRC.z, d) : 0.0, (hasNextRow && hasNextCol) ? getAValue(b, sourceFloorRC.x, sourceFloorRC.z, d + 1) : 0.0); vec4 bottomLeft = vec4( getAValue(b, sourceCeilRC.x, sourceFloorRC.y, d), hasNextCol ? getAValue(b, sourceCeilRC.x, sourceFloorRC.y, d + 1) : 0.0, hasNextRow ? getAValue(b, sourceCeilRC.x, sourceFloorRC.z, d) : 0.0, (hasNextRow && hasNextCol) ? getAValue(b, sourceCeilRC.x, sourceFloorRC.z, d + 1) : 0.0); vec4 topRight = vec4( getAValue(b, sourceFloorRC.x, sourceCeilRC.y, d), hasNextCol ? getAValue(b, sourceFloorRC.x, sourceCeilRC.y, d + 1) : 0.0, hasNextRow ? getAValue(b, sourceFloorRC.x, sourceCeilRC.z, d) : 0.0, (hasNextRow && hasNextCol) ? getAValue(b, sourceFloorRC.x, sourceCeilRC.z, d + 1) : 0.0); vec4 bottomRight = vec4( getAValue(b, sourceCeilRC.x, sourceCeilRC.y, d), hasNextCol ? getAValue(b, sourceCeilRC.x, sourceCeilRC.y, d + 1) : 0.0, hasNextRow ? getAValue(b, sourceCeilRC.x, sourceCeilRC.z, d) : 0.0, (hasNextRow && hasNextCol) ? getAValue(b, sourceCeilRC.x, sourceCeilRC.z, d + 1) : 0.0); vec3 fracRC = sourceFracIndexRC - vec3(sourceFloorRC); vec4 top = mix(topLeft, topRight, fracRC.yyzz); vec4 bottom = mix(bottomLeft, bottomRight, fracRC.yyzz); vec4 newValue = mix(top, bottom, fracRC.x); setOutput(newValue); } `}};function kernels_ResizeBilinear_resizeBilinear(t){let{inputs:r,backend:a,attrs:n}=t,{images:s}=r,{alignCorners:i,halfPixelCenters:o,size:l}=n,[u,p]=l,m=eV.getBool("WEBGL_PACK_IMAGE_OPERATIONS")?new ResizeBilinearPackedProgram(s.shape,u,p,i,o):new ResizeBilinearProgram(s.shape,u,p,i,o);return a.runWebGLProgram(m,[s],"float32")}let ResizeBilinearBackpropProgram=class ResizeBilinearBackpropProgram{constructor(t,r,a){this.variableNames=["dy"],this.outputShape=[],this.outputShape=r;const[,n,s]=r,[,i,o]=t,l=[a&&i>1?n-1:n,a&&o>1?s-1:s],u=[a&&i>1?i-1:i,a&&o>1?o-1:o],p=l[0]/u[0],m=l[1]/u[1],y=1/p,_=1/m,w=2*Math.ceil(y)+2,I=2*Math.ceil(_)+2;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int d = coords[3]; int r = coords[1]; int c = coords[2]; float accumulator = 0.0; const float heightScale = float(${p}); const float widthScale = float(${m}); const float invHeightScale = float(${y}); const float invWidthScale = float(${_}); const int winHeight = int(${w}); const int winWidth = int(${I}); // Compute bounds for where in dy we will look float startRLerp = floor(float(r) * invHeightScale); int startDyR = int(startRLerp - float(winHeight / 2)); float startCLerp = floor(float(c) * invWidthScale); int startDyC = int(startCLerp - float(winWidth / 2)); // Loop over dy for (int dyROffset = 0; dyROffset < winHeight; dyROffset++) { int dyR = dyROffset + startDyR; // Guard against the window exceeding the bounds of dy if (dyR < 0 || dyR >= ${i}) { continue; } for (int dyCOffset = 0; dyCOffset < winWidth; dyCOffset++) { int dyC = dyCOffset + startDyC; // Guard against the window exceeding the bounds of dy if (dyC < 0 || dyC >= ${o}) { continue; } float dxR = float(dyR) * heightScale; int topDxRIndex = int(floor(dxR)); int bottomDxRIndex = int(min(ceil(dxR), ${n-1}.0)); float dxRLerp = dxR - float(topDxRIndex); float inverseDxRLerp = 1.0 - dxRLerp; float dxC = float(dyC) * widthScale; int leftDxCIndex = int(floor(dxC)); int rightDxCIndex = int(min(ceil(dxC), ${s-1}.0)); float dxCLerp = dxC - float(leftDxCIndex); float inverseDxCLerp = 1.0 - dxCLerp; if (r == topDxRIndex && c == leftDxCIndex) { // topLeft accumulator += getDy(b, dyR, dyC, d) * inverseDxRLerp * inverseDxCLerp; } if (r == topDxRIndex && c == rightDxCIndex) { // topRight accumulator += getDy(b, dyR, dyC, d) * inverseDxRLerp * dxCLerp; } if (r == bottomDxRIndex && c == leftDxCIndex) { // bottomLeft accumulator += getDy(b, dyR, dyC, d) * dxRLerp * inverseDxCLerp; } if (r == bottomDxRIndex && c == rightDxCIndex) { // bottomRight accumulator += getDy(b, dyR, dyC, d) * dxRLerp * dxCLerp; } } } // End loop over dy setOutput(accumulator); } `}};function ResizeBilinearGrad_resizeBilinearGrad(t){let{inputs:r,backend:a,attrs:n}=t,{images:s,dy:i}=r,{alignCorners:o}=n,l=new ResizeBilinearBackpropProgram(i.shape,s.shape,o);return a.runWebGLProgram(l,[i],i.dtype)}let ResizeNearestNeighborProgram=class ResizeNearestNeighborProgram{constructor(t,r,a,n,s){this.variableNames=["A"],this.outputShape=[];const[i,o,l,u]=t;this.outputShape=[i,r,a,u];const p=[n&&r>1?o-1:o,n&&a>1?l-1:l],m=[n&&r>1?r-1:r,n&&a>1?a-1:a];this.userCode=` const vec2 effectiveInputOverOutputRatioRC = vec2( ${p[0]/m[0]}, ${p[1]/m[1]}); const vec2 inputShapeRC = vec2(${o}.0, ${l}.0); void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int d = coords[3]; ivec2 yRC = coords.yz; // Fractional source index. vec2 sourceFracIndexRC = ${s?"max((vec2(yRC) + vec2(0.5)) * effectiveInputOverOutputRatioRC, vec2(0.0))":"vec2(yRC) * effectiveInputOverOutputRatioRC"}; // Compute the coordinators of nearest neighbor point. ivec2 sourceNearestRC = ivec2( min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${n?"0.5":"0.0"}))); float newValue = getA(b, sourceNearestRC.x, sourceNearestRC.y, d); setOutput(newValue); } `}};let ResizeNearestNeighborPackedProgram=class ResizeNearestNeighborPackedProgram{constructor(t,r,a,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=[];const[i,o,l,u]=t;this.outputShape=[i,r,a,u];const p=[n&&r>1?o-1:o,n&&a>1?l-1:l],m=[n&&r>1?r-1:r,n&&a>1?a-1:a];this.userCode=` const vec3 effectiveInputOverOutputRatioRC = vec3( ${p[0]/m[0]}, ${p[1]/m[1]}, ${p[1]/m[1]}); const vec3 inputShapeRC = vec3(${o}.0, ${l}.0, ${l}.0); float getAValue(int b, int r, int c, int d) { return getChannel(getA(b, r, c, d), vec2(c, d)); } void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int d = coords[3]; // Calculate values for next column in yRC.z. ivec3 yRC = coords.yzz + ivec3(0, 0, 1); // Fractional source index. vec3 sourceFracIndexRC = ${s?"max((vec3(yRC) + vec3(0.5)) * effectiveInputOverOutputRatioRC, vec3(0.0))":"vec3(yRC) * effectiveInputOverOutputRatioRC"}; // Compute the coordinators of nearest neighbor point. ivec3 sourceNearestRC = ivec3( min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${n?"0.5":"0.0"}))); // Should we calculate next column and row elements in 2x2 packed cell. bool hasNextCol = d < ${u-1}; bool hasNextRow = coords.z < ${a-1}; vec4 newValue = vec4( getAValue(b, sourceNearestRC.x, sourceNearestRC.y, d), hasNextCol ? getAValue(b, sourceNearestRC.x, sourceNearestRC.y, d + 1) : 0.0, hasNextRow ? getAValue(b, sourceNearestRC.x, sourceNearestRC.z, d) : 0.0, (hasNextRow && hasNextCol) ? getAValue(b, sourceNearestRC.x, sourceNearestRC.z, d + 1) : 0.0); setOutput(newValue); } `}};function kernels_ResizeNearestNeighbor_resizeNearestNeighbor(t){let{inputs:r,backend:a,attrs:n}=t,{images:s}=r,{alignCorners:i,halfPixelCenters:o,size:l}=n,[u,p]=l,m=eV.getBool("WEBGL_PACK_IMAGE_OPERATIONS")?new ResizeNearestNeighborPackedProgram(s.shape,u,p,i,o):new ResizeNearestNeighborProgram(s.shape,u,p,i,o);return a.runWebGLProgram(m,[s],s.dtype)}let ResizeNearestNeigborBackpropProgram=class ResizeNearestNeigborBackpropProgram{constructor(t,r,a){this.variableNames=["dy"],this.outputShape=[],this.outputShape=r;const[,n,s]=r,[,i,o]=t,l=[a&&i>1?n-1:n,a&&o>1?s-1:s],u=[a&&i>1?i-1:i,a&&o>1?o-1:o],p=l[0]/u[0],m=l[1]/u[1],y=1/p,_=1/m,w=2*Math.ceil(y)+2,I=2*Math.ceil(_)+2;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; int d = coords[3]; int r = coords[1]; int c = coords[2]; float accumulator = 0.0; const float heightScale = float(${p}); const float widthScale = float(${m}); const float invHeightScale = float(${y}); const float invWidthScale = float(${_}); const int winHeight = int(${w}); const int winWidth = int(${I}); // Compute bounds for where in dy we will look float startRLerp = floor(float(r) * invHeightScale); int startDyR = int(floor(startRLerp - float(winHeight / 2))); float startCLerp = floor(float(c) * invWidthScale); int startDyC = int(floor(startCLerp - float(winWidth / 2))); // Loop over dy for (int dyROffset = 0; dyROffset < winHeight; dyROffset++) { int dyR = dyROffset + startDyR; // Guard against the window exceeding the bounds of dy if (dyR < 0 || dyR >= ${i}) { continue; } for (int dyCOffset = 0; dyCOffset < winWidth; dyCOffset++) { int dyC = dyCOffset + startDyC; // Guard against the window exceeding the bounds of dy if (dyC < 0 || dyC >= ${o}) { continue; } float sourceFracRow = float(${l[0]}) * (float(dyR) / float(${u[0]})); float sourceFracCol = float(${l[1]}) * (float(dyC) / float(${u[1]})); int sourceNearestRow = int(min( float(int(${n}) - 1), ${a} ? float(round(sourceFracRow)) : float(floor(sourceFracRow)))); int sourceNearestCol = int(min( float(int(${s}) - 1), ${a} ? float(round(sourceFracCol)) : float(floor(sourceFracCol)))); if (r == sourceNearestRow && c == sourceNearestCol) { accumulator += getDy(b, dyR, dyC, d); } } } // End loop over dy setOutput(accumulator); } `}};function ResizeNearestNeighborGrad_resizeNearestNeighborGrad(t){let{inputs:r,backend:a,attrs:n}=t,{images:s,dy:i}=r,{alignCorners:o}=n,l=new ResizeNearestNeigborBackpropProgram(i.shape,s.shape,o);return a.runWebGLProgram(l,[i],i.dtype)}let ReverseProgram=class ReverseProgram{constructor(t,r){this.variableNames=["x"];const a=t.length;if(a>4)throw Error(`WebGL backend: Reverse of rank-${a} tensor is not yet supported`);if(this.outputShape=t,1===a){this.userCode=` void main() { int coord = getOutputCoords(); setOutput(getX(${t[0]} - coord - 1)); } `;return}const n=t.map((a,n)=>-1!==r.indexOf(n)&&1!==t[n]?`${t[n]} - coords[${n}] - 1`:`coords[${n}]`).join(","),s=getCoordsDataType(a);this.userCode=` void main() { ${s} coords = getOutputCoords(); setOutput(getX(${n})); } `}};let ReversePackedProgram=class ReversePackedProgram{constructor(t,r){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0;const a=t.length;if(a>4)throw Error(`WebGL backend: Reverse of rank-${a} tensor is not yet supported`);this.outputShape=t;const n=getChannels("rc",a),s=`${n[a-1]} + 1 < ${this.outputShape[a-1]}`,i=`${n[a-2]} + 1 < ${this.outputShape[a-2]}`,o=getCoordsDataType(a);function getR(t){return getChannel(t)}function getG(t){return t[a-1]="("+t[a-1]+" + 1)",getChannel(t)}function getB(t){return t[a-2]="("+t[a-2]+" + 1)",getChannel(t)}function getA(t){return t[a-1]="("+t[a-1]+" + 1)",t[a-2]="("+t[a-2]+" + 1)",getChannel(t)}function getChannel(r){let a=t.map((t,a)=>getInCoord(a,r)),n=a.join(","),s=a.slice(-2).join(",");return`getChannel(getX(${n}), vec2(${s}))`}function getInCoord(a,n){return -1!==r.indexOf(a)&&1!==t[a]?`${t[a]} - ${n[a]} - 1`:`${n[a]}`}1===a?this.userCode=` void main(){ int rc = getOutputCoords(); vec4 result = vec4(0.); result.r = getChannel(getX(${t[0]} - rc - 1), ${t[0]} - rc - 1); if(${s}){ result.g = getChannel(getX(${t[0]} - (rc + 1) - 1), ${t[0]} - (rc + 1) - 1); } setOutput(result); } `:this.userCode=` void main() { ${o} rc = getOutputCoords(); vec4 result = vec4(0.); result.r = ${getR(n.slice())}; if(${s}){ result.g = ${getG(n.slice())}; } if(${i}) { result.b = ${getB(n.slice())}; if(${s}) { result.a = ${getA(n.slice())}; } } setOutput(result); } `}};function kernels_Reverse_reverse(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{dims:i}=n,o=s.shape.length,l=parseAxisParam(i,s.shape);if(0===o)return kernels_Identity_identity({inputs:{x:s},backend:a});let u=eV.getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new ReversePackedProgram(s.shape,l):new ReverseProgram(s.shape,l);return a.runWebGLProgram(u,[s],s.dtype)}let RotateProgram=class RotateProgram{constructor(t,r){this.variableNames=["Image"],this.outputShape=[],this.customUniforms=[{name:"params",type:"vec4"}];const a=t[1],n=t[2];this.outputShape=t;let s="";s="number"==typeof r?`float outputValue = ${r.toFixed(2)};`:` vec3 fill = vec3(${r.join(",")}); float outputValue = fill[coords[3]];`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int x = coords[2]; int y = coords[1]; float coordXFloat = (float(x) - params[0]) * params[3] - (float(y) - params[1]) * params[2]; float coordYFloat = (float(x) - params[0]) * params[2] + (float(y) - params[1]) * params[3]; int coordX = int(round(coordXFloat + params[0])); int coordY = int(round(coordYFloat + params[1])); ${s} if(coordX >= 0 && coordX < ${n} && coordY >= 0 && coordY < ${a}) { outputValue = getImage(coords[0], coordY, coordX, coords[3]); } setOutput(outputValue); } `}};let ca=kernel_funcs_utils_unaryKernelFunc({opSnippet:` // OpenGL ES does not support round function. // The algorithm is based on banker's rounding. float base = floor(x); if ((x - base) < 0.5) { return floor(x); } else if ((x - base) > 0.5) { return ceil(x); } else { if (mod(base, 2.0) == 0.0) { return base; } else { return base + 1.0; } } `}),cn=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return inversesqrt(x);",cpuKernelImpl:pM});let ScatterProgram=class ScatterProgram{constructor(t,r,a,n,s,i,o=!0,l=!1){this.variableNames=["updates","indices","defaultValue"],this.outputShape=i;const u=getCoordsDataType(s.length),p=getCoordsDataType(i.length);let m="";1===a?m="i":2===a&&(m="i, j");const y=`getIndices(${m})`;let _="";1===n?_="i":2===n&&(_="i, coords[1]");const w=`getUpdates(${_})`;let I="";l&&(I="coords[0], coords[1]");const C=`getDefaultValue(${I})`;this.userCode=` ${u} strides = ${u}(${s}); void main() { ${p} coords = getOutputCoords(); float sum = 0.0; bool found = false; for (int i = 0; i < ${t}; i++) { int flattenedIndex = 0; for (int j = 0; j < ${r}; j++) { int index = round(${y}); flattenedIndex += index * ${r>1?"strides[j]":"strides"}; } if (flattenedIndex == coords[0]) { sum += ${w}; found = true; } } setOutput(mix(${C}, sum, float(found))); } `}};let ScatterPackedProgram=class ScatterPackedProgram{constructor(t,r,a,n,s,i,o=!0,l=!1){this.variableNames=["updates","indices","defaultValue"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=i;const u=getCoordsDataType(s.length),p=getCoordsDataType(i.length);let m="";1===a?m="i":2===a&&(m="i, j");const y=`getIndices(${m})`;let _="";1===n?_="i":2===n&&(_="i, coords[1]");const w=`getUpdates(${_})`;let I="";l&&(I="coords[0], coords[1]");const C=`getDefaultValue(${I})`;this.userCode=` ${u} strides = ${u}(${s}); void main() { ${p} coords = getOutputCoords(); vec4 sum = vec4(0.); vec4 found = vec4(0.); for (int i = 0; i < ${t}; i+=2) { ivec2 flattenedIndex = ivec2(0); for (int j = 0; j < ${r}; j+=2) { ivec4 index = round(${y}); flattenedIndex += index.xz * ${r>1?"strides[j]":"strides"}; if (j + 1 < ${r}) { flattenedIndex += index.yw * ${r>1?"strides[j + 1]":"strides"}; } } if (flattenedIndex[0] == coords[0] || flattenedIndex[1] == coords[0] || flattenedIndex[0] == coords[0] + 1 || flattenedIndex[1] == coords[0] + 1) { vec4 updVals = ${w}; if (flattenedIndex[0] == coords[0]) { sum.xy += updVals.xy; found.xy = vec2(1.); } else if (flattenedIndex[0] == coords[0] + 1) { sum.zw += updVals.xy; found.zw = vec2(1.); } if (flattenedIndex[1] == coords[0]) { sum.xy += updVals.zw; found.xy = vec2(1.); } else if (flattenedIndex[1] == coords[0] + 1) { sum.zw += updVals.zw; found.zw = vec2(1.); } } } setOutput(mix(${C}, sum, found)); } `}};function ScatterNd_scatterNd(t){let r,{inputs:a,backend:n,attrs:s}=t,{indices:i,updates:o}=a,{shape:l}=s,{sliceRank:u,numUpdates:p,sliceSize:m,strides:y,outputSize:_}=calculateShapes(o,i,l),w=[_/m,m];if(0===_)return n.makeTensorInfo(l,i.dtype);let I=kernels_Reshape_reshape({inputs:{x:i},backend:n,attrs:{shape:[p,u]}}),C=kernels_Reshape_reshape({inputs:{x:o},backend:n,attrs:{shape:[p,m]}}),E=n.makeTensorInfo([],"float32",new Float32Array([0]));r=eV.getBool("WEBGL_PACK")?new ScatterPackedProgram(p,u,I.shape.length,C.shape.length,y,w):new ScatterProgram(p,u,I.shape.length,C.shape.length,y,w);let A=n.runWebGLProgram(r,[C,I,E],C.dtype),$=kernels_Reshape_reshape({inputs:{x:A},backend:n,attrs:{shape:l}});return n.disposeIntermediateTensorInfo(I),n.disposeIntermediateTensorInfo(C),n.disposeIntermediateTensorInfo(A),n.disposeIntermediateTensorInfo(E),$}let SearchSortedProgram=class SearchSortedProgram{constructor(t,r,a,n){this.variableNames=["sortedSequence","values"],this.customUniforms=[{name:"numInputs",type:"int"}],this.outputShape=[t,a];const s=`for (int i = 0; i < ${Math.ceil(Math.log2(r+1))}; ++i) { if (left >= right) break;`,i=2===eV.getNumber("WEBGL_VERSION")?"while (left < right) {":s;this.userCode=` int findBound(int batch, float value) { int left = 0; int right = numInputs; int mid; ${i} mid = (left + right) / 2; if (getSortedSequence(batch, mid) ${"left"===n?"<":"<="} value) { left = mid + 1; } else { right = mid; } } return right; } void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; int valueIndex = coords[1]; float value = getValues(batch, valueIndex); setOutput(float(findBound(batch, value))); } `}};function kernels_SearchSorted_searchSorted(t){let{inputs:r,backend:a,attrs:n}=t,{sortedSequence:s,values:i}=r,{side:o}=n,l=new SearchSortedProgram(s.shape[0],s.shape[1],i.shape[1],o),u=[[s.shape[1]]];return a.runWebGLProgram(l,[s,i],"int32",u)}let SelectProgram=class SelectProgram{constructor(t,r,a){let n,s;if(this.variableNames=["c","a","b"],this.outputShape=r,a>4)throw Error(`Where for rank ${a} is not yet supported`);if(1===a)s="resRC",n="resRC";else{const a=["resRC.x","resRC.y","resRC.z","resRC.w"],i=[],o=[];for(let n=0;n= 1.0) { setOutput(getA(${s})); } else { setOutput(getB(${s})); } } `}};function kernels_Select_select(t){let{inputs:r,backend:a}=t,{condition:n,t:s,e:i}=r,o=new SelectProgram(n.shape.length,s.shape,s.shape.length);return a.runWebGLProgram(o,[n,s,i],upcastType(s.dtype,i.dtype))}let cs=kernel_funcs_utils_unaryKernelFunc({opSnippet:` // Stable and Attracting Fixed Point (0, 1) for Normalized Weights. // see: https://arxiv.org/abs/1706.02515 float scaleAlpha = ${oT}; float scale = ${ok}; return (x >= 0.0) ? scale * x : scaleAlpha * (exp(x) - 1.0); `}),ci=kernel_funcs_utils_unaryKernelFunc({opSnippet:hu+` return 1.0 / (1.0 + exp(-1.0 * x)); `,packedOpSnippet:` vec4 result = 1.0 / (1.0 + exp(-1.0 * x)); bvec4 isNaN = isnan(x); result.r = isNaN.r ? x.r : result.r; result.g = isNaN.g ? x.g : result.g; result.b = isNaN.b ? x.b : result.b; result.a = isNaN.a ? x.a : result.a; return result; `,cpuKernelImpl:pz}),co=kernel_funcs_utils_unaryKernelFunc({opSnippet:` if (isnan(x)) { return 0.0; } return sign(x); `}),cl=kernel_funcs_utils_unaryKernelFunc({opSnippet:hu+` return sin(x); `,packedOpSnippet:` vec4 result = sin(x); bvec4 isNaN = isnan(x); ${hn} return result; `}),cu=kernel_funcs_utils_unaryKernelFunc({opSnippet:` float e2x = exp(x); return (e2x - 1.0 / e2x) / 2.0; `}),cp=kernel_funcs_utils_unaryKernelFunc({opSnippet:` float epsilon = 1.1920928955078125e-7; float threshold = log(epsilon) + 2.0; bool too_large = x > -threshold; bool too_small = x < threshold; float result; float exp_x = exp(x); if (too_large){ result = x; } else if (too_small){ result = exp_x; } else{ result = log(exp_x + 1.0); } return result; `});function kernels_SparseFillEmptyRows_sparseFillEmptyRows(t){let{inputs:r,backend:a}=t,{indices:n,values:s,denseShape:i,defaultValue:o}=r;if(1!==i.shape.length)throw Error(`Dense shape must be a vector, saw: ${i.shape}`);if(2!==n.shape.length)throw Error(`Indices must be a matrix, saw: ${n.shape}`);if(1!==s.shape.length)throw Error(`Values must be a vector, saw: ${s.shape}`);if(0!==o.shape.length)throw Error(`Default value must be a scalar, saw: ${o.shape}`);let l=a.readSync(n.dataId),u=a.readSync(s.dataId),p=a.readSync(i.dataId),m=a.readSync(o.dataId)[0],[y,_,w,I,C]=pW(l,n.shape,n.dtype,u,s.dtype,p,m);return[a.makeTensorInfo(_,n.dtype,y),a.makeTensorInfo([_[0]],s.dtype,w),a.makeTensorInfo([I.length],"bool",new Uint8Array(I.map(t=>Number(t)))),a.makeTensorInfo([C.length],n.dtype,new Int32Array(C))]}function kernels_SparseReshape_sparseReshape(t){let{inputs:r,backend:a}=t,{inputIndices:n,inputShape:s,newShape:i}=r;if(2!==n.shape.length)throw Error(`Input indices should be a matrix but received shape ${n.shape}`);if(1!==s.shape.length)throw Error(`Input shape should be a vector but received shape ${s.shape}`);if(1!==i.shape.length)throw Error(`Target shape should be a vector but received shape ${i.shape}`);let o=Array.from(a.readSync(s.dataId)),l=a.readSync(n.dataId),u=Array.from(a.readSync(i.dataId)),[p,m,y]=pU(l,n.shape,n.dtype,o,u);return[a.makeTensorInfo(m,n.dtype,p),a.makeTensorInfo([y.length],i.dtype,new Int32Array(y))]}function kernels_SparseSegmentMean_sparseSegmentMean(t){let{inputs:r,backend:a}=t,{data:n,indices:s,segmentIds:i}=r;if(n.shape.length<1)throw Error("Data should be at least 1 dimensional but received scalar");if(1!==s.shape.length)throw Error(`Indices should be a vector but received shape ${s.shape}`);if(1!==i.shape.length)throw Error(`Segment ids should be a vector but received shape ${i.shape}`);let o=a.readSync(n.dataId),l=a.readSync(s.dataId),u=a.readSync(i.dataId),[p,m]=pG(o,n.shape,n.dtype,l,u,!0);return a.makeTensorInfo(m,n.dtype,p)}function kernels_SparseSegmentSum_sparseSegmentSum(t){let{inputs:r,backend:a}=t,{data:n,indices:s,segmentIds:i}=r;if(n.shape.length<1)throw Error("Data should be at least 1 dimensional but received scalar");if(1!==s.shape.length)throw Error(`Indices should be a vector but received shape ${s.shape}`);if(1!==i.shape.length)throw Error(`Segment ids should be a vector but received shape ${i.shape}`);let o=a.readSync(n.dataId),l=a.readSync(s.dataId),u=a.readSync(i.dataId),[p,m]=pG(o,n.shape,n.dtype,l,u);return a.makeTensorInfo(m,n.dtype,p)}function kernels_SparseToDense_sparseToDense(t){let{inputs:r,backend:a,attrs:n}=t,{sparseIndices:s,sparseValues:i,defaultValue:o}=r,{outputShape:l}=n,{sliceRank:u,numUpdates:p,sliceSize:m,strides:y,outputSize:_}=calculateShapes(i,s,l);if("string"===i.dtype){let t=pL(a.bufferSync(s),a.bufferSync(i),l,_,m,p,u,y,decodeString(a.readSync(o.dataId)[0]),!1);return a.makeTensorInfo(l,t.dtype,t.values)}let w=new ScatterProgram(p,u,s.shape.length,i.shape.length,y,[_,1],!1),I=a.runWebGLProgram(w,[i,s,o],i.dtype),C=kernels_Reshape_reshape({inputs:{x:I},backend:a,attrs:{shape:l}});return a.disposeIntermediateTensorInfo(I),C}function SplitV_splitV(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{numOrSizeSplits:i,axis:o}=n,l=parseAxisParam(o,s.shape)[0],u=prepareSplitSize(s,i,l),p=Array(s.shape.length).fill(0),m=s.shape.slice();return u.map(t=>{let r=[...m];r[l]=t;let n=kernels_Slice_slice({inputs:{x:s},backend:a,attrs:{begin:p,size:r}});return p[l]+=t,n})}let ch="return sqrt(x);",cc=kernel_funcs_utils_unaryKernelFunc({opSnippet:ch,packedOpSnippet:ch,cpuKernelImpl:pj}),cd=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return x * x;"}),cm="return (a - b) * (a - b);",cf=kernel_funcs_utils_binaryKernelFunc({opSnippet:cm,packedOpSnippet:cm});function kernels_StaticRegexReplace_staticRegexReplace(t){let{inputs:r,backend:a,attrs:n}=t,{x:s}=r;if("string"!==s.dtype)throw Error("Input must be of datatype string");let i=pK(fromUint8ToStringArray(a.readSync(s.dataId)),"string",n);return a.makeTensorInfo(s.shape,"string",i)}function kernels_Step_step({inputs:t,attrs:r,backend:a}){let{x:n}=t,s=p2+` return x > 0.0 ? 1.0 : float(${r.alpha}); `,i=new UnaryOpProgram(n.shape,s);return a.runWebGLProgram(i,[n],n.dtype)}let StridedSliceProgram=class StridedSliceProgram{constructor(t,r,a){this.variableNames=["x"],this.outputShape=a;const n=a.length,s=getCoordsDataType(a.length),i=getCoordsDataType(a.length);let o="";if(1===n)o="coords * strides + begin";else{let t=0;o=a.map((r,n)=>(t++,1===a.length?`coords * strides[${n}] + begin[${n}]`:`coords[${t-1}] * strides[${n}] + begin[${n}]`)).join(",")}this.userCode=` ${s} begin = ${s}(${t}); ${s} strides = ${s}(${r}); void main() { ${i} coords = getOutputCoords(); setOutput(getX(${o})); } `}};function kernels_StridedSlice_stridedSlice(t){let r,{inputs:a,backend:n,attrs:s}=t,{x:i}=a,{begin:o,end:l,strides:u,beginMask:p,endMask:m,ellipsisMask:y,newAxisMask:_,shrinkAxisMask:w}=s,{finalShapeSparse:I,finalShape:C,isIdentity:E,sliceDim0:A,isSimpleSlice:$,begin:F,end:D,strides:P}=sliceInfo(i.shape,o,l,u,p,m,y,_,w);if(E)r=kernels_Reshape_reshape({inputs:{x:i},backend:n,attrs:{shape:C}});else if(A||$){assert(i.shape.length>=1,()=>`Input must have rank at least 1, got: ${i.shape.length}`);let t=computeOutShape(F,D,P),a=kernels_Slice_slice({inputs:{x:i},backend:n,attrs:{begin:F,size:t}});r=kernels_Reshape_reshape({inputs:{x:a},backend:n,attrs:{shape:C}}),n.disposeIntermediateTensorInfo(a)}else if(n.shouldExecuteOnCPU([i])){let t=n.readSync(i.dataId),a=pH(I,buffer(i.shape,i.dtype,t),P,F);r=n.makeTensorInfo(C,i.dtype,a.values)}else{let t=new StridedSliceProgram(F,P,I);r=n.runWebGLProgram(t,[i],i.dtype)}let L=kernels_Reshape_reshape({inputs:{x:r},backend:n,attrs:{shape:C}});return n.disposeIntermediateTensorInfo(r),L}function kernels_StringNGrams_stringNGrams(t){let{inputs:r,backend:a,attrs:n}=t,{separator:s,nGramWidths:i,leftPad:o,rightPad:l,padWidth:u,preserveShortSequences:p}=n,{data:m,dataSplits:y}=r,[_,w]=pq(a.readSync(m.dataId),a.readSync(y.dataId),s,i,o,l,u,p);return[a.makeTensorInfo([_.length],"string",_),a.makeTensorInfo(y.shape,"int32",w)]}function kernels_StringSplit_stringSplit(t){let{inputs:r,backend:a,attrs:n}=t,{skipEmpty:s}=n,{input:i,delimiter:o}=r;if("string"!==i.dtype)throw Error("Input must be of datatype string");if(1!==i.shape.length)throw Error(`Input must be a vector, got shape: ${i.shape}`);if(0!==o.shape.length)throw Error(`Delimiter must be a scalar, got shape: ${o.shape}`);let[l,u,p]=pX(a.readSync(i.dataId),a.readSync(o.dataId)[0],s),m=u.length;return[a.makeTensorInfo([m,2],"int32",l),a.makeTensorInfo([m],"string",u),a.makeTensorInfo([2],"int32",new Int32Array(p))]}function kernels_StringToHashBucketFast_stringToHashBucketFast(t){let{inputs:r,backend:a,attrs:n}=t,{numBuckets:s}=n,{input:i}=r;if("string"!==i.dtype)throw Error("Input must be of datatype string");if(s<=0)throw Error("Number of buckets must be at least 1");let o=pY(a.readSync(i.dataId),s);return a.makeTensorInfo(i.shape,"int32",o)}let cg=kernel_funcs_utils_unaryKernelFunc({opSnippet:"return tan(x);"}),cy=kernel_funcs_utils_unaryKernelFunc({opSnippet:` float e2x = exp(-2.0 * abs(x)); return sign(x) * (1.0 - e2x) / (1.0 + e2x); `});function kernels_TensorScatterUpdate_tensorScatterUpdate(t){let{inputs:r,backend:a,attrs:n}=t,{tensor:s,indices:i,updates:o}=r,{}=n,{sliceRank:l,numUpdates:u,sliceSize:p,strides:m,outputSize:y}=calculateShapes(o,i,s.shape),_=[y/p,p];if(0===y)return a.makeTensorInfo(s.shape,i.dtype);let w=kernels_Reshape_reshape({inputs:{x:i},backend:a,attrs:{shape:[u,l]}}),I=kernels_Reshape_reshape({inputs:{x:o},backend:a,attrs:{shape:[u,p]}}),C=kernels_Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:_}}),E=new ScatterProgram(u,l,w.shape.length,I.shape.length,m,_,!1,!0),A=a.runWebGLProgram(E,[I,w,C],C.dtype),$=kernels_Reshape_reshape({inputs:{x:A},backend:a,attrs:{shape:s.shape}});return a.disposeIntermediateTensorInfo(w),a.disposeIntermediateTensorInfo(I),a.disposeIntermediateTensorInfo(C),a.disposeIntermediateTensorInfo(A),$}let TileProgram=class TileProgram{constructor(t,r){this.variableNames=["A"];const a=Array(t.length);for(let n=0;n5)throw Error(`Tile for rank ${r} is not yet supported`);if(1===r)return`imod(resRC, ${t[0]})`;let a=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u"],n=[];for(let r=0;r5){let t=a.readSync(s.dataId),r="string"===s.dtype?t.map(t=>decodeString(t)):t,n=pJ(buffer(s.shape,s.dtype,r),i);return a.makeTensorInfo(n.shape,n.dtype,n.values)}let o=new TileProgram(s.shape,i);return a.runWebGLProgram(o,[s],s.dtype)}let SwapProgram=class SwapProgram{constructor(t){this.variableNames=["x","indices"],this.customUniforms=[{name:"n",type:"int"},{name:"firstPass",type:"int"},{name:"negativeInf",type:"float"},{name:"dir",type:"int"},{name:"inc",type:"int"}],this.outputShape=t,this.userCode=` void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; int elemIdx = coords[1]; // We compare elements pair-wise within a group of size 2 * inc. // The comparing rule for each group alternates between ascending // and descending. Within each group, we compare each pair at // positions i and i+inc. To decide whether an element at position i // is x0 or x1, we mod it by 2 * inc, if the result is smaller than // inc, it is in the first half of the group, we denote it as x0, // otherwise we denote it as x1. // For example, as shown in the Bitonic top K paper referenced above, // Figure5(a) shows that element[1] is in the // second half of the group when group size is 2, but it is in the // first half of the group when group size is 4. bool isFirstInPair = imod(elemIdx, 2 * inc) < inc; int i = isFirstInPair ? elemIdx : elemIdx - inc; int i0 = firstPass == 1 ? i : int(getIndices(batch, i)); int i1 = firstPass == 1 ? i + inc : int(getIndices(batch, i + inc)); float x0 = i0 < n ? getX(batch, i0) : negativeInf; float x1 = i1 < n ? getX(batch, i1) : negativeInf; // Denotes which direction indices are in (ascending or descending). bool reverse = imod(elemIdx, 2 * dir) >= dir; bool isGreater = x0 > x1 || (x0 == x1 && i1 > i0); if (reverse == isGreater) { // Elements in opposite order of direction int iTemp = i0; i0 = i1; i1 = iTemp; } if (isFirstInPair) { setOutput(float(i0)); } else { setOutput(float(i1)); } } `}};let MergeProgram=class MergeProgram{constructor(t){this.variableNames=["x","indices"],this.customUniforms=[{name:"n",type:"int"},{name:"firstPass",type:"int"},{name:"k",type:"int"}],this.outputShape=t,this.userCode=` void main() { // Takes max of indices (0, k), (1, k + 1), (2, k + 2) ... ivec2 coords = getOutputCoords(); int batch = coords[0]; int elemIdx = coords[1]; // The output size is half of the previous size. // If the previous sequence is | | | | _ _ _ _ | | | | _ _ _ _ (k=4), // we only need to output the indices at positions |, the indices at // positions _ can be thrown away, see Figure5(b) After Phase 2 // (Merge phase) in the Bitonic Top K paper referenced above. // For example, the paper shows we only need to output the orange bars. // The output sequence should look like this | | | | | | | |. // Because the sequence is halved, to map the output index back // to the previous sequence to find the corresponding value, // we need to double the index. When we double the index, // we basically interpolate a position, so 2i looks like // | _ | _ | _ | _ | _ | _ | _. We move the | to the first k position // of each 2k positions by - elemIdx % k. E.g. for output at // index 4,5,6,7, we want to get the corresponding element at // original index 8,9,10,11, for output at index 8,9,10,11, // we want to get the corresponding element at original index // 16,17,18,19, so on and so forth. int i = elemIdx < k ? elemIdx : (elemIdx * 2 - imod(elemIdx, k)); int i0 = firstPass == 1 ? i : int(getIndices(batch, i)); int i1 = firstPass == 1 ? i + k : int(getIndices(batch, i + k)); float x0 = getX(batch, i0); float x1 = i1 < n ? getX(batch, i1) : x0; setOutput(x0 >= x1 ? float(i0) : float(i1)); } `}};function disposeIntermediateTensorInfoOrNull(t,r){null!==r&&t.disposeIntermediateTensorInfo(r)}function roundUpToPow2(t){let r=1;for(;ru){let[t,r]=pQ(a.readSync(s.dataId),p,s.dtype,i,o);return[a.makeTensorInfo(t.shape,t.dtype,t.values),a.makeTensorInfo(r.shape,r.dtype,r.values)]}if(0===i)return p[p.length-1]=0,[a.makeTensorInfo(p,s.dtype,[]),a.makeTensorInfo(p,"int32",[])];if(1===m)return[s,kernels_Fill_fill({attrs:{shape:p,dtype:"int32",value:0},backend:a})];let y=a.texData.get(s.dataId),_=null!==y&&y.isPacked,w=_?a.unpackTensor(s):s,I=sizeFromShape(p)/m,C=kernels_Reshape_reshape({inputs:{x:w},attrs:{shape:[I,m]},backend:a});_&&disposeIntermediateTensorInfoOrNull(a,w);let E=roundUpToPow2(i),A=roundUpToPow2(m),$=null,getInputs=()=>null===$?[C,C]:[C,$],runSwap=(t,r,n)=>{let s=getInputs(),i=new SwapProgram(n),o=[[m],[+(null===$)],[-1/0],[t],[r]],l=$;$=a.runWebGLProgram(i,s,"int32",o),disposeIntermediateTensorInfoOrNull(a,l)};for(let t=1;t=1;a/=2)runSwap(r,a,[I,A])}for(let t=A;t>E;t/=2){let r=getInputs(),n=new MergeProgram([I,t/2]),s=[[m],[+(null===$)],[E]],i=$;$=a.runWebGLProgram(n,r,"int32",s),disposeIntermediateTensorInfoOrNull(a,i);let o=E/2,l=2*o;for(let t=o;t>=1;t/=2)runSwap(l,t,$.shape)}let F=$;$=kernels_Slice_slice({inputs:{x:$},backend:a,attrs:{begin:0,size:[I,i]}}),disposeIntermediateTensorInfoOrNull(a,F);let D=GatherV2_gatherV2({inputs:{x:C,indices:$},backend:a,attrs:{axis:1,batchDims:1}});disposeIntermediateTensorInfoOrNull(a,C);let P=p.slice(0,-1);P.push(i),F=$,$=kernels_Reshape_reshape({inputs:{x:$},attrs:{shape:P},backend:a}),disposeIntermediateTensorInfoOrNull(a,F);let L=D;return D=kernels_Reshape_reshape({inputs:{x:D},attrs:{shape:P},backend:a}),disposeIntermediateTensorInfoOrNull(a,L),[D,$]}let TransformProgram=class TransformProgram{constructor(t,r,a,n,s,i){let o;switch(this.variableNames=["Image","Transforms"],this.outputShape=i,n){case"constant":default:o=1;break;case"reflect":o=2;break;case"wrap":o=3;break;case"nearest":o=4}this.userCode=` float mapCoord(float outCoord, float len) { float inCoord = outCoord; if(${o} == 2) { if (inCoord < 0.0) { if (len <= 1.0) { inCoord = 0.0; } else { float sz2 = 2.0 * len; if (inCoord < sz2) { inCoord = sz2 * float(int(float(-inCoord / sz2))) + inCoord; } inCoord = inCoord < -len ? inCoord + sz2 : -inCoord - 1.0; } } else if (inCoord > len - 1.0) { if (len <= 1.0) { inCoord = 0.0; } else { float sz2 = 2.0 * len; inCoord -= sz2 * float(int(float(inCoord / sz2))); if (inCoord >= len) { inCoord = sz2 - inCoord - 1.0; } } } return clamp(inCoord, 0.0, len - 1.0); } else if (${o} == 3) { if (inCoord < 0.0) { if (len <= 1.0) { inCoord = 0.0; } else { float sz = len - 1.0; inCoord += len * (float(int(float(-inCoord / sz))) + 1.0); } } else if (inCoord > len - 1.0) { if (len <= 1.0) { inCoord = 0.0; } else { float sz = len - 1.0; inCoord -= len * float(int(float(inCoord / sz))); } } return clamp(inCoord, 0.0, len - 1.0); } else if (${o} == 4) { return clamp(outCoord, 0.0, len - 1.0); } else { return outCoord; } } float readWithFillValue(int batch, int coordY, int coordX, int channel) { float outputValue; if (0 <= coordY && coordY < ${t} && 0 <= coordX && coordX < ${r}) { outputValue = getImage(batch, coordY, coordX, channel); } else { outputValue = float(${s}); } return outputValue; } void main() { ivec4 coords = getOutputCoords(); float outputValue; int batch = coords[0]; int x = coords[2]; int y = coords[1]; int channel = coords[3]; float xf = float(x); float yf = float(y); float a1 = getTransforms(batch, 0); float a2 = getTransforms(batch, 1); float a3 = getTransforms(batch, 2); float b1 = getTransforms(batch, 3); float b2 = getTransforms(batch, 4); float b3 = getTransforms(batch, 5); float c1 = getTransforms(batch, 6); float c2 = getTransforms(batch, 7); float projection = c1 * xf + c2 * yf + 1.0; if (projection == 0.0) { outputValue = float(${s}); } else { float inX = (a1 * xf + a2 * yf + a3) / projection; float inY = (b1 * xf + b2 * yf + b3) / projection; float mapX = mapCoord(inX, float(${r})); float mapY = mapCoord(inY, float(${t})); if (${"nearest"===a?1:2} == 1) { int coordY = int(round(mapY)); int coordX = int(round(mapX)); outputValue = readWithFillValue(batch, coordY, coordX, channel); } else { float yFloor = floor(mapY); float xFloor = floor(mapX); float yCeil = yFloor + 1.0; float xCeil = xFloor + 1.0; float valueYFloor = (xCeil - mapX) * readWithFillValue(batch, int(yFloor), int(xFloor), channel) + (mapX - xFloor) * readWithFillValue(batch, int(yFloor), int(xCeil), channel); float valueYCeil = (xCeil - mapX) * readWithFillValue(batch, int(yCeil), int(xFloor), channel) + (mapX - xFloor) * readWithFillValue(batch, int(yCeil), int(xCeil), channel); outputValue = (yCeil - mapY) * valueYFloor + (mapY - yFloor) * valueYCeil; } } setOutput(outputValue); } `}};function kernels_Transform_transform(t){let{inputs:r,backend:a,attrs:n}=t,{image:s,transforms:i}=r,{interpolation:o,fillMode:l,fillValue:u,outputShape:p}=n,[m,y,_,w]=s.shape,[I,C]=null!=p?p:[y,_],E=new TransformProgram(y,_,o,l,u,[m,I,C,w]);return a.runWebGLProgram(E,[s,i],"float32")}function kernels_Unique_unique(t){let{inputs:r,attrs:a,backend:n}=t,{axis:s}=a,{x:i}=r;webgl_util_assertNotComplex(i,"unique"),console.warn("WARNING: ","UI might be locked temporarily as data is being downloaded");let{outputValues:o,outputShape:l,indices:u}=p1(n.readSync(i.dataId),s,i.shape,i.dtype);return[n.makeTensorInfo(l,i.dtype,o),n.makeTensorInfo([u.length],"int32",u)]}function Unpack_unpack(t){let{inputs:r,backend:a,attrs:n}=t,{value:s}=r,{axis:i}=n;i<0&&(i+=s.shape.length);let o=s.shape.length,l=s.shape[i],u=Array(o-1),p=0;for(let t=0;ta.disposeIntermediateTensorInfo(t)),w}let SegmentOpProgram=class SegmentOpProgram{constructor(t,r){this.variableNames=["x","segmentIds"];const a=t.windowSize,n=t.batchSize,s=t.inSize,i=t.numSegments,o=i*Math.ceil(s/a);this.outputShape=[n,o];const l=4*Math.floor(a/4),u=a%4,p=` sumValue += dot(values, segFilter); `;let m="";s%a>0&&(m=` if (inIdx < 0 || inIdx >= ${s}) { return initializationValue; } `);let y="";s%a>0&&(y=` if (inIdx < 0 || inIdx >= ${s}) { return -1.0; } `),this.userCode=` const float initializationValue = 0.0; float getValue(int batch, int inIdx) { ${m} return getX(batch, inIdx); } float getSegmentIdAtIndex(int inIdx) { ${y} return getSegmentIds(inIdx); } void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; int outIdx = coords[1]; int inOffset = int(floor(float(outIdx) / float( ${i})) * float(${a})); int currentSeg = int(mod(float(outIdx), float(${i}))); float sumValue = 0.0; for (int i = 0; i < ${l}; i += 4) { int inIdx = inOffset + i; vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), getValue(batch, inIdx + 3) ); vec4 segFilter = vec4( int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0, int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0, int(getSegmentIdAtIndex(inIdx + 2)) == currentSeg ? 1 : 0, int(getSegmentIdAtIndex(inIdx + 3)) == currentSeg ? 1 : 0 ); ${p} } int inIdx = inOffset + ${l}; if (${1===u}) { vec4 values = vec4( getValue(batch, inIdx), initializationValue, initializationValue, initializationValue ); int inIdxSeg = int(getSegmentIdAtIndex(inIdx)); vec4 segFilter = vec4( int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0, 0, 0, 0 ); ${p} } else if (${2===u}) { vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), initializationValue, initializationValue ); vec4 segFilter = vec4( int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0, int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0, 0, 0 ); ${p} } else if (${3===u}) { vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), initializationValue ); vec4 segFilter = vec4( int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0, int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0, int(getSegmentIdAtIndex(inIdx + 2)) == currentSeg ? 1 : 0, 0 ); ${p} } setOutput(sumValue); } `}};function kernels_UnsortedSegmentSum_unsortedSegmentSum(t){let{inputs:r,backend:a,attrs:n}=t,{x:s,segmentIds:i}=r,{numSegments:o}=n,l=s.shape.length,u=[],p=0,m=getAxesPermutation([0],l),y=s;null!=m&&(y=kernels_Transpose_transpose({inputs:{x:s},backend:a,attrs:{perm:m}}),u.push(y),p=getInnerMostAxes(1,l)[0]);let _=segment_util_computeOutShape(y.shape,p,o),w=sizeFromShape([y.shape[p]]),I=kernels_Reshape_reshape({inputs:{x:y},backend:a,attrs:{shape:[-1,w]}});u.push(I);let C=sumOutType(s.dtype),segOpCompute=(t,r,n,s,i)=>{let o=t.shape[0],l=t.shape[1],p=segOpComputeOptimalWindowSize(l,i),m=new SegmentOpProgram({windowSize:p,inSize:l,batchSize:o,numSegments:i},r),y=a.compileAndRun(m,[t,n],s);if(u.push(y),y.shape[1]===i)return y;let _=kernels_Range_range({backend:a,attrs:{start:0,stop:i,step:1,dtype:"float32"}}),w=kernels_Tile_tile({inputs:{x:_},backend:a,attrs:{reps:[l/p]}});return u.push(_),u.push(w),segOpCompute(y,r,w,s,i)},E=kernels_Reshape_reshape({inputs:{x:segOpCompute(I,"unsortedSegmentSum",i,C,o)},backend:a,attrs:{shape:_}}),A=E;return null!=m&&(u.push(E),A=kernels_Transpose_transpose({inputs:{x:A},backend:a,attrs:{perm:getUndoAxesPermutation(m)}})),u.forEach(t=>a.disposeIntermediateTensorInfo(t)),A}for(let t of[{kernelName:an,backendName:"webgl",kernelFunc:_FusedMatMul_fusedMatMul},{kernelName:"Abs",backendName:"webgl",kernelFunc:kernels_Abs_abs},{kernelName:eB,backendName:"webgl",kernelFunc:hc},{kernelName:eW,backendName:"webgl",kernelFunc:hd},{kernelName:"Add",backendName:"webgl",kernelFunc:hf},{kernelName:eU,backendName:"webgl",kernelFunc:kernels_AddN_addN},{kernelName:"All",backendName:"webgl",kernelFunc:kernels_All_all},{kernelName:"Any",backendName:"webgl",kernelFunc:kernels_Any_any},{kernelName:eG,backendName:"webgl",kernelFunc:kernels_ArgMax_argMax},{kernelName:ej,backendName:"webgl",kernelFunc:kernels_ArgMin_argMin},{kernelName:eK,backendName:"webgl",kernelFunc:hg},{kernelName:eH,backendName:"webgl",kernelFunc:hy},{kernelName:eq,backendName:"webgl",kernelFunc:hb},{kernelName:eY,backendName:"webgl",kernelFunc:hx},{kernelName:eX,backendName:"webgl",kernelFunc:hv},{kernelName:eZ,backendName:"webgl",kernelFunc:kernels_AvgPool_avgPool},{kernelName:eQ,backendName:"webgl",kernelFunc:AvgPool3D_avgPool3D},{kernelName:e0,backendName:"webgl",kernelFunc:AvgPool3DGrad_avgPool3DGrad},{kernelName:eJ,backendName:"webgl",kernelFunc:kernels_AvgPoolGrad_avgPoolGrad},{kernelName:e1,backendName:"webgl",kernelFunc:BatchMatMul_batchMatMul},{kernelName:tD,backendName:"webgl",kernelFunc:({inputs:t,backend:r,attrs:a})=>{let{x:n,mean:s,variance:i,offset:o,scale:l}=t;assert(s.shape.length===i.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),assert(null==o||s.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),assert(null==l||s.shape.length===l.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");let{varianceEpsilon:u}=a;null==u&&(u=.001);let p=[n,s,i],m=null;null!=o&&(m=o.shape,p.push(o));let y=null;null!=l&&(y=l.shape,p.push(l));let _=eV.getBool("WEBGL_PACK_NORMALIZATION")?new BatchNormPackedProgram(n.shape,s.shape,i.shape,m,y,u):new BatchNormProgram(n.shape,s.shape,i.shape,m,y,u);return r.runWebGLProgram(_,p,p[0].dtype)}},{kernelName:e2,backendName:"webgl",kernelFunc:t=>{let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{blockShape:i,crops:o}=n;assert(s.shape.length<=4,()=>"batchToSpaceND for rank > 4 with a WebGL backend not implemented yet");let l=i.reduce((t,r)=>t*r),u=getReshaped(s.shape,i,l),p=getPermuted(u.length,i.length),m=getReshapedPermuted(s.shape,i,l),y=getSliceBeginCoords(o,i.length),_=getSliceSize(m,o,i.length),w=[],I=kernels_Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:u}}),C=kernels_Transpose_transpose({inputs:{x:I},backend:a,attrs:{perm:p}}),E=kernels_Reshape_reshape({inputs:{x:C},backend:a,attrs:{shape:m}}),A=kernels_Slice_slice({inputs:{x:E},backend:a,attrs:{begin:y,size:_}});return w.push(I),w.push(C),w.push(E),w.forEach(t=>a.disposeIntermediateTensorInfo(t)),A}},{kernelName:e3,backendName:"webgl",kernelFunc:kernels_Bincount_bincount},{kernelName:e4,backendName:"webgl",kernelFunc:kernels_BitwiseAnd_bitwiseAnd},{kernelName:e6,backendName:"webgl",kernelFunc:kernels_BroadcastArgs_broadcastArgs},{kernelName:e5,backendName:"webgl",kernelFunc:kernels_Cast_cast},{kernelName:e8,backendName:"webgl",kernelFunc:hI},{kernelName:e7,backendName:"webgl",kernelFunc:kernels_ClipByValue_clipByValue},{kernelName:e9,backendName:"webgl",kernelFunc:kernels_Complex_complex},{kernelName:te,backendName:"webgl",kernelFunc:ComplexAbs_complexAbs},{kernelName:tt,backendName:"webgl",kernelFunc:kernels_Concat_concat},{kernelName:tr,backendName:"webgl",kernelFunc:Conv2D_conv2d},{kernelName:tn,backendName:"webgl",kernelFunc:kernels_Conv2DBackpropFilter_conv2DBackpropFilter},{kernelName:ts,backendName:"webgl",kernelFunc:kernels_Conv2DBackpropInput_conv2DBackpropInput},{kernelName:ti,backendName:"webgl",kernelFunc:Conv3D_conv3D},{kernelName:to,backendName:"webgl",kernelFunc:Conv3DBackpropFilterV2_conv3DBackpropFilterV2},{kernelName:tl,backendName:"webgl",kernelFunc:Conv3DBackpropInputV2_conv3DBackpropInput},{kernelName:"Cos",backendName:"webgl",kernelFunc:hN},{kernelName:tu,backendName:"webgl",kernelFunc:hC},{kernelName:tc,backendName:"webgl",kernelFunc:t=>{let{inputs:r,backend:a,attrs:n}=t,{image:s,boxes:i,boxInd:o}=r,{cropSize:l,method:u,extrapolationValue:p}=n,m=new CropAndResizeProgram(s.shape,i.shape,l,u,p);return a.runWebGLProgram(m,[s,i,o],"float32")}},{kernelName:tp,backendName:"webgl",kernelFunc:kernels_Cumprod_cumprod},{kernelName:th,backendName:"webgl",kernelFunc:kernels_Cumsum_cumsum},{kernelName:td,backendName:"webgl",kernelFunc:kernels_DenseBincount_denseBincount},{kernelName:tm,backendName:"webgl",kernelFunc:kernels_DepthToSpace_depthToSpace},{kernelName:tf,backendName:"webgl",kernelFunc:DepthwiseConv2dNative_depthwiseConv2dNative},{kernelName:tg,backendName:"webgl",kernelFunc:kernels_DepthwiseConv2dNativeBackpropFilter_depthwiseConv2dNativeBackpropFilter},{kernelName:ty,backendName:"webgl",kernelFunc:kernels_DepthwiseConv2dNativeBackpropInput_depthwiseConv2dNativeBackpropInput},{kernelName:tx,backendName:"webgl",kernelFunc:kernels_Diag_diag},{kernelName:tv,backendName:"webgl",kernelFunc:dilation2D},{kernelName:tw,backendName:"webgl",kernelFunc:kernels_Einsum_einsum},{kernelName:"Elu",backendName:"webgl",kernelFunc:hE},{kernelName:tI,backendName:"webgl",kernelFunc:t=>{let{inputs:r,backend:a}=t,{dy:n,y:s}=r,i=eV.getBool("WEBGL_PACK_BINARY_OPERATIONS")?new BinaryOpPackedProgram(hA,n.shape,s.shape):new BinaryOpProgram("return (b >= 0.0) ? a : a * (b + 1.0);",n.shape,s.shape);return a.runWebGLProgram(i,[n,s],n.dtype)}},{kernelName:tN,backendName:"webgl",kernelFunc:h$},{kernelName:"Erf",backendName:"webgl",kernelFunc:hR},{kernelName:"Exp",backendName:"webgl",kernelFunc:hF},{kernelName:tC,backendName:"webgl",kernelFunc:kernels_ExpandDims_expandDims},{kernelName:tE,backendName:"webgl",kernelFunc:hP},{kernelName:"FFT",backendName:"webgl",kernelFunc:kernels_FFT_fft},{kernelName:tA,backendName:"webgl",kernelFunc:kernels_Fill_fill},{kernelName:t$,backendName:"webgl",kernelFunc:({inputs:t,backend:r})=>{let{image:a}=t,n=new FlipLeftRightProgram(a.shape);return r.runWebGLProgram(n,[a],a.dtype)}},{kernelName:tR,backendName:"webgl",kernelFunc:hM},{kernelName:tF,backendName:"webgl",kernelFunc:hL},{kernelName:at,backendName:"webgl",kernelFunc:FromPixels_fromPixels},{kernelName:as,backendName:"webgl",kernelFunc:fusedConv2d},{kernelName:ai,backendName:"webgl",kernelFunc:FusedDepthwiseConv2D_fusedDepthwiseConv2D},{kernelName:tO,backendName:"webgl",kernelFunc:GatherNd_gatherNd},{kernelName:tP,backendName:"webgl",kernelFunc:GatherV2_gatherV2},{kernelName:tM,backendName:"webgl",kernelFunc:hV},{kernelName:tL,backendName:"webgl",kernelFunc:hB},{kernelName:tz,backendName:"webgl",kernelFunc:kernels_Identity_identity},{kernelName:tV,backendName:"webgl",kernelFunc:kernels_IFFT_ifft},{kernelName:tB,backendName:"webgl",kernelFunc:kernels_Imag_imag},{kernelName:tW,backendName:"webgl",kernelFunc:hW},{kernelName:tU,backendName:"webgl",kernelFunc:hU},{kernelName:tG,backendName:"webgl",kernelFunc:hG},{kernelName:tj,backendName:"webgl",kernelFunc:kernels_LeakyRelu_leakyRelu},{kernelName:tK,backendName:"webgl",kernelFunc:hj},{kernelName:tH,backendName:"webgl",kernelFunc:hK},{kernelName:tq,backendName:"webgl",kernelFunc:LinSpace_linSpace},{kernelName:"Log",backendName:"webgl",kernelFunc:hH},{kernelName:tX,backendName:"webgl",kernelFunc:hq},{kernelName:tY,backendName:"webgl",kernelFunc:hX},{kernelName:tZ,backendName:"webgl",kernelFunc:hY},{kernelName:tJ,backendName:"webgl",kernelFunc:hZ},{kernelName:"LRN",backendName:"webgl",kernelFunc:t=>{let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{depthRadius:i,bias:o,alpha:l,beta:u}=n,p=eV.getBool("WEBGL_PACK_NORMALIZATION")?new LRNPackedProgram(s.shape,i,o,l,u):new LRNProgram(s.shape,i,o,l,u);return a.runWebGLProgram(p,[s],s.dtype)}},{kernelName:tQ,backendName:"webgl",kernelFunc:t=>{let{inputs:r,backend:a,attrs:n}=t,{x:s,y:i,dy:o}=r,{depthRadius:l,bias:u,alpha:p,beta:m}=n,y=new LRNGradProgram(s.shape,l,u,p,m);return a.runWebGLProgram(y,[s,i,o],s.dtype)}},{kernelName:"Max",backendName:"webgl",kernelFunc:kernels_Max_max},{kernelName:t0,backendName:"webgl",kernelFunc:hJ},{kernelName:t1,backendName:"webgl",kernelFunc:kernels_MaxPool_maxPool},{kernelName:t3,backendName:"webgl",kernelFunc:MaxPool3D_maxPool3d},{kernelName:t4,backendName:"webgl",kernelFunc:MaxPool3DGrad_maxPool3DGrad},{kernelName:t2,backendName:"webgl",kernelFunc:kernels_MaxPoolGrad_maxPoolGrad},{kernelName:t6,backendName:"webgl",kernelFunc:({inputs:t,attrs:r,backend:a})=>{let{x:n}=t,{filterSize:s,strides:i,pad:o,includeBatchInIndex:l}=r;assert(4===n.shape.length,()=>`Error in maxPool: input must be rank 4 but got rank ${n.shape.length}.`);let u=[1,1];assert(eitherStridesOrDilationsAreOne(i,u),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${i} and dilations '${u}'`);let p=computePool2DInfo(n.shape,s,i,u,o),[m,y]=MaxPoolWithArgmax_impl_maxPoolWithArgmaxImpl(n,l,p,a);return[m,y]}},{kernelName:t5,backendName:"webgl",kernelFunc:({inputs:t,attrs:r,backend:a})=>{let{x:n}=t,{keepDims:s,axis:i}=r,o=n.shape.length,l=parseAxisParam(i,n.shape),u=l,p=getAxesPermutation(u,o),m=null!=p,y=a.shouldExecuteOnCPU([n]),_=[],w=n;if(m){if(y){let t=a.texData.get(w.dataId).values,r=Array(o);for(let t=0;t{let{x:n}=t,{paddings:s,mode:i}=a,o=eV.getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new MirrorPadPackedProgram(n.shape,s,i):new MirrorPadProgram(n.shape,s,i);return r.runWebGLProgram(o,[n],n.dtype)}},{kernelName:"Mod",backendName:"webgl",kernelFunc:h0},{kernelName:t9,backendName:"webgl",kernelFunc:kernels_Multinomial_multinomial},{kernelName:re,backendName:"webgl",kernelFunc:kernels_Multiply_multiply},{kernelName:"Neg",backendName:"webgl",kernelFunc:kernels_Neg_neg},{kernelName:rr,backendName:"webgl",kernelFunc:NonMaxSuppressionV3_nonMaxSuppressionV3},{kernelName:rn,backendName:"webgl",kernelFunc:NonMaxSuppressionV4_nonMaxSuppressionV4},{kernelName:rs,backendName:"webgl",kernelFunc:NonMaxSuppressionV5_nonMaxSuppressionV5},{kernelName:rt,backendName:"webgl",kernelFunc:hS},{kernelName:ro,backendName:"webgl",kernelFunc:t=>{let{inputs:r,backend:a,attrs:n}=t,{indices:s}=r,{dtype:i,depth:o,onValue:l,offValue:u}=n,p=sizeFromShape(s.shape),m=new OneHotProgram(p,o,l,u),y=kernels_Reshape_reshape({inputs:{x:s},backend:a,attrs:{shape:[p]}}),_=a.runWebGLProgram(m,[y],i);a.disposeIntermediateTensorInfo(y);let w=kernels_Reshape_reshape({inputs:{x:_},backend:a,attrs:{shape:[...s.shape,o]}});return a.disposeIntermediateTensorInfo(_),w}},{kernelName:ri,backendName:"webgl",kernelFunc:kernels_OnesLike_onesLike},{kernelName:rl,backendName:"webgl",kernelFunc:Pack_pack},{kernelName:ru,backendName:"webgl",kernelFunc:PadV2_padV2},{kernelName:"Pow",backendName:"webgl",kernelFunc:h9},{kernelName:rp,backendName:"webgl",kernelFunc:kernels_Prelu_prelu},{kernelName:rh,backendName:"webgl",kernelFunc:kernels_Prod_prod},{kernelName:rc,backendName:"webgl",kernelFunc:kernels_RaggedGather_raggedGather},{kernelName:rd,backendName:"webgl",kernelFunc:kernels_RaggedRange_raggedRange},{kernelName:rm,backendName:"webgl",kernelFunc:kernels_RaggedTensorToTensor_raggedTensorToTensor},{kernelName:rf,backendName:"webgl",kernelFunc:kernels_Range_range},{kernelName:rg,backendName:"webgl",kernelFunc:kernels_Real_real},{kernelName:tS,backendName:"webgl",kernelFunc:h1},{kernelName:ry,backendName:"webgl",kernelFunc:ce},{kernelName:rx,backendName:"webgl",kernelFunc:ct},{kernelName:rw,backendName:"webgl",kernelFunc:cr},{kernelName:rv,backendName:"webgl",kernelFunc:kernels_Reshape_reshape},{kernelName:rk,backendName:"webgl",kernelFunc:kernels_ResizeBilinear_resizeBilinear},{kernelName:rS,backendName:"webgl",kernelFunc:ResizeBilinearGrad_resizeBilinearGrad},{kernelName:r_,backendName:"webgl",kernelFunc:kernels_ResizeNearestNeighbor_resizeNearestNeighbor},{kernelName:rT,backendName:"webgl",kernelFunc:ResizeNearestNeighborGrad_resizeNearestNeighborGrad},{kernelName:rI,backendName:"webgl",kernelFunc:kernels_Reverse_reverse},{kernelName:ar,backendName:"webgl",kernelFunc:({inputs:t,attrs:r,backend:a})=>{let{image:n}=t,{radians:s,fillValue:i,center:o}=r,l=new RotateProgram(n.shape,i),[u,p]=getImageCenter(o,n.shape[1],n.shape[2]),m=[[u,p,Math.sin(s),Math.cos(s)]];return a.runWebGLProgram(l,[n],n.dtype,m)}},{kernelName:rN,backendName:"webgl",kernelFunc:ca},{kernelName:rC,backendName:"webgl",kernelFunc:cn},{kernelName:rE,backendName:"webgl",kernelFunc:ScatterNd_scatterNd},{kernelName:r$,backendName:"webgl",kernelFunc:kernels_SearchSorted_searchSorted},{kernelName:rR,backendName:"webgl",kernelFunc:kernels_Select_select},{kernelName:rF,backendName:"webgl",kernelFunc:cs},{kernelName:rM,backendName:"webgl",kernelFunc:ci},{kernelName:rO,backendName:"webgl",kernelFunc:co},{kernelName:"Sin",backendName:"webgl",kernelFunc:cl},{kernelName:rP,backendName:"webgl",kernelFunc:cu},{kernelName:rD,backendName:"webgl",kernelFunc:kernels_Slice_slice},{kernelName:rW,backendName:"webgl",kernelFunc:kernels_Softmax_softmax},{kernelName:rL,backendName:"webgl",kernelFunc:cp},{kernelName:rV,backendName:"webgl",kernelFunc:t=>{let{inputs:r,backend:a,attrs:n}=t,{x:s}=r,{blockShape:i,paddings:o}=n;assert(s.shape.length<=4,()=>"spaceToBatchND for rank > 4 with a WebGL backend not implemented yet");let l=i.reduce((t,r)=>t*r),u=[[0,0]];u.push(...o);for(let t=1+i.length;ta.disposeIntermediateTensorInfo(t)),E}},{kernelName:rU,backendName:"webgl",kernelFunc:kernels_SparseFillEmptyRows_sparseFillEmptyRows},{kernelName:rG,backendName:"webgl",kernelFunc:kernels_SparseReshape_sparseReshape},{kernelName:rj,backendName:"webgl",kernelFunc:kernels_SparseSegmentMean_sparseSegmentMean},{kernelName:rK,backendName:"webgl",kernelFunc:kernels_SparseSegmentSum_sparseSegmentSum},{kernelName:rH,backendName:"webgl",kernelFunc:kernels_SparseToDense_sparseToDense},{kernelName:rB,backendName:"webgl",kernelFunc:SplitV_splitV},{kernelName:rz,backendName:"webgl",kernelFunc:cc},{kernelName:rX,backendName:"webgl",kernelFunc:cd},{kernelName:rq,backendName:"webgl",kernelFunc:cf},{kernelName:rY,backendName:"webgl",kernelFunc:kernels_StaticRegexReplace_staticRegexReplace},{kernelName:ae,backendName:"webgl",kernelFunc:kernels_Step_step},{kernelName:rZ,backendName:"webgl",kernelFunc:kernels_StridedSlice_stridedSlice},{kernelName:rJ,backendName:"webgl",kernelFunc:kernels_StringNGrams_stringNGrams},{kernelName:rQ,backendName:"webgl",kernelFunc:kernels_StringSplit_stringSplit},{kernelName:r0,backendName:"webgl",kernelFunc:kernels_StringToHashBucketFast_stringToHashBucketFast},{kernelName:"Sub",backendName:"webgl",kernelFunc:h3},{kernelName:"Sum",backendName:"webgl",kernelFunc:kernels_Sum_sum},{kernelName:"Tan",backendName:"webgl",kernelFunc:cg},{kernelName:r1,backendName:"webgl",kernelFunc:cy},{kernelName:rA,backendName:"webgl",kernelFunc:kernels_TensorScatterUpdate_tensorScatterUpdate},{kernelName:r2,backendName:"webgl",kernelFunc:kernels_Tile_tile},{kernelName:r3,backendName:"webgl",kernelFunc:TopK_topK},{kernelName:r4,backendName:"webgl",kernelFunc:kernels_Transform_transform},{kernelName:r6,backendName:"webgl",kernelFunc:kernels_Transpose_transpose},{kernelName:r5,backendName:"webgl",kernelFunc:kernels_Unique_unique},{kernelName:r8,backendName:"webgl",kernelFunc:Unpack_unpack},{kernelName:r7,backendName:"webgl",kernelFunc:kernels_UnsortedSegmentSum_unsortedSegmentSum},{kernelName:r9,backendName:"webgl",kernelFunc:kernels_ZerosLike_zerosLike}])registerKernel(t)},0x247afecc7:t=>{t.exports=Long;var r=null;try{r=new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([0,97,115,109,1,0,0,0,1,13,2,96,0,1,127,96,4,127,127,127,127,1,127,3,7,6,0,1,1,1,1,1,6,6,1,127,1,65,0,11,7,50,6,3,109,117,108,0,1,5,100,105,118,95,115,0,2,5,100,105,118,95,117,0,3,5,114,101,109,95,115,0,4,5,114,101,109,95,117,0,5,8,103,101,116,95,104,105,103,104,0,0,10,191,1,6,4,0,35,0,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,126,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,127,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,128,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,129,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,130,34,4,66,32,135,167,36,0,32,4,167,11])),{}).exports}catch{}function Long(t,r,a){this.low=0|t,this.high=0|r,this.unsigned=!!a}function isLong(t){return!0===(t&&t.__isLong__)}Long.prototype.__isLong__,Object.defineProperty(Long.prototype,"__isLong__",{value:!0}),Long.isLong=isLong;var a={},n={};function fromInt(t,r){var s,i,o;if(r)return(t>>>=0,(o=0<=t&&t<256)&&(i=n[t]))?i:(s=fromBits(t,(0|t)<0?-1:0,!0),o&&(n[t]=s),s);return(t|=0,(o=-128<=t&&t<128)&&(i=a[t]))?i:(s=fromBits(t,t<0?-1:0,!1),o&&(a[t]=s),s)}function fromNumber(t,r){if(isNaN(t))return r?m:p;if(r){if(t<0)return m;if(t>=o)return C}else{if(t<=-l)return E;if(t+1>=l)return I}return t<0?fromNumber(-t,r).neg():fromBits(t%i|0,t/i|0,r)}function fromBits(t,r,a){return new Long(t,r,a)}Long.fromInt=fromInt,Long.fromNumber=fromNumber,Long.fromBits=fromBits;var s=Math.pow;function fromString(t,r,a){if(0===t.length)throw Error("empty string");if("NaN"===t||"Infinity"===t||"+Infinity"===t||"-Infinity"===t)return p;if("number"==typeof r?(a=r,r=!1):r=!!r,(a=a||10)<2||360)throw Error("interior hyphen");if(0===n)return fromString(t.substring(1),r,a).neg();for(var n,i=fromNumber(s(a,8)),o=p,l=0;l>>0:this.low},A.toNumber=function(){return this.unsigned?(this.high>>>0)*i+(this.low>>>0):this.high*i+(this.low>>>0)},A.toString=function(t){if((t=t||10)<2||36>>0).toString(t);if((o=u).isZero())return p+l;for(;p.length<6;)p="0"+p;l=""+p+l}},A.getHighBits=function(){return this.high},A.getHighBitsUnsigned=function(){return this.high>>>0},A.getLowBits=function(){return this.low},A.getLowBitsUnsigned=function(){return this.low>>>0},A.getNumBitsAbs=function(){if(this.isNegative())return this.eq(E)?64:this.neg().getNumBitsAbs();for(var t=0!=this.high?this.high:this.low,r=31;r>0&&(t&1<=0},A.isOdd=function(){return(1&this.low)==1},A.isEven=function(){return(1&this.low)==0},A.equals=function(t){return isLong(t)||(t=fromValue(t)),(this.unsigned===t.unsigned||this.high>>>31!=1||t.high>>>31!=1)&&this.high===t.high&&this.low===t.low},A.eq=A.equals,A.notEquals=function(t){return!this.eq(t)},A.neq=A.notEquals,A.ne=A.notEquals,A.lessThan=function(t){return 0>this.comp(t)},A.lt=A.lessThan,A.lessThanOrEqual=function(t){return 0>=this.comp(t)},A.lte=A.lessThanOrEqual,A.le=A.lessThanOrEqual,A.greaterThan=function(t){return this.comp(t)>0},A.gt=A.greaterThan,A.greaterThanOrEqual=function(t){return this.comp(t)>=0},A.gte=A.greaterThanOrEqual,A.ge=A.greaterThanOrEqual,A.compare=function(t){if(isLong(t)||(t=fromValue(t)),this.eq(t))return 0;var r=this.isNegative(),a=t.isNegative();return r&&!a?-1:!r&&a?1:this.unsigned?t.high>>>0>this.high>>>0||t.high===this.high&&t.low>>>0>this.low>>>0?-1:1:this.sub(t).isNegative()?-1:1},A.comp=A.compare,A.negate=function(){return!this.unsigned&&this.eq(E)?E:this.not().add(y)},A.neg=A.negate,A.add=function(t){isLong(t)||(t=fromValue(t));var r,a,n=this.high>>>16,s=65535&this.high,i=this.low>>>16,o=65535&this.low,l=t.high>>>16,u=65535&t.high,p=t.low>>>16,m=65535&t.low,y=0,_=0;return r=0+((a=0+(o+m))>>>16),a&=65535,r+=i+p,_+=r>>>16,r&=65535,_+=s+u,y+=_>>>16,_&=65535,y+=n+l,fromBits(r<<16|a,(y&=65535)<<16|_,this.unsigned)},A.subtract=function(t){return isLong(t)||(t=fromValue(t)),this.add(t.neg())},A.sub=A.subtract,A.multiply=function(t){if(this.isZero())return p;if(isLong(t)||(t=fromValue(t)),r)return fromBits(r.mul(this.low,this.high,t.low,t.high),r.get_high(),this.unsigned);if(t.isZero())return p;if(this.eq(E))return t.isOdd()?E:p;if(t.eq(E))return this.isOdd()?E:p;if(this.isNegative())if(t.isNegative())return this.neg().mul(t.neg());else return this.neg().mul(t).neg();if(t.isNegative())return this.mul(t.neg()).neg();if(this.lt(u)&&t.lt(u))return fromNumber(this.toNumber()*t.toNumber(),this.unsigned);var a,n,s=this.high>>>16,i=65535&this.high,o=this.low>>>16,l=65535&this.low,m=t.high>>>16,y=65535&t.high,_=t.low>>>16,w=65535&t.low,I=0,C=0;return a=0+((n=0+l*w)>>>16),n&=65535,a+=o*w,C+=a>>>16,a&=65535,a+=l*_,C+=a>>>16,a&=65535,C+=i*w,I+=C>>>16,C&=65535,C+=o*_,I+=C>>>16,C&=65535,C+=l*y,I+=C>>>16,C&=65535,I+=s*w+i*_+o*y+l*m,fromBits(a<<16|n,(I&=65535)<<16|C,this.unsigned)},A.mul=A.multiply,A.divide=function(t){if(isLong(t)||(t=fromValue(t)),t.isZero())throw Error("division by zero");if(r){var a,n,i;return this.unsigned||-0x80000000!==this.high||-1!==t.low||-1!==t.high?fromBits((this.unsigned?r.div_u:r.div_s)(this.low,this.high,t.low,t.high),r.get_high(),this.unsigned):this}if(this.isZero())return this.unsigned?m:p;if(this.unsigned){if(t.unsigned||(t=t.toUnsigned()),t.gt(this))return m;if(t.gt(this.shru(1)))return _;i=m}else{if(this.eq(E))if(t.eq(y)||t.eq(w))return E;else return t.eq(E)?y:(a=this.shr(1).div(t).shl(1)).eq(p)?t.isNegative()?y:w:(n=this.sub(t.mul(a)),i=a.add(n.div(t)));if(t.eq(E))return this.unsigned?m:p;if(this.isNegative())return t.isNegative()?this.neg().div(t.neg()):this.neg().div(t).neg();if(t.isNegative())return this.div(t.neg()).neg();i=p}for(n=this;n.gte(t);){for(var o=Math.ceil(Math.log(a=Math.max(1,Math.floor(n.toNumber()/t.toNumber())))/Math.LN2),l=o<=48?1:s(2,o-48),u=fromNumber(a),I=u.mul(t);I.isNegative()||I.gt(n);)a-=l,I=(u=fromNumber(a,this.unsigned)).mul(t);u.isZero()&&(u=y),i=i.add(u),n=n.sub(I)}return i},A.div=A.divide,A.modulo=function(t){return(isLong(t)||(t=fromValue(t)),r)?fromBits((this.unsigned?r.rem_u:r.rem_s)(this.low,this.high,t.low,t.high),r.get_high(),this.unsigned):this.sub(this.div(t).mul(t))},A.mod=A.modulo,A.rem=A.modulo,A.not=function(){return fromBits(~this.low,~this.high,this.unsigned)},A.and=function(t){return isLong(t)||(t=fromValue(t)),fromBits(this.low&t.low,this.high&t.high,this.unsigned)},A.or=function(t){return isLong(t)||(t=fromValue(t)),fromBits(this.low|t.low,this.high|t.high,this.unsigned)},A.xor=function(t){return isLong(t)||(t=fromValue(t)),fromBits(this.low^t.low,this.high^t.high,this.unsigned)},A.shiftLeft=function(t){return(isLong(t)&&(t=t.toInt()),0==(t&=63))?this:t<32?fromBits(this.low<>>32-t,this.unsigned):fromBits(0,this.low<>>t|this.high<<32-t,this.high>>t,this.unsigned):fromBits(this.high>>t-32,this.high>=0?0:-1,this.unsigned)},A.shr=A.shiftRight,A.shiftRightUnsigned=function(t){if(isLong(t)&&(t=t.toInt()),0==(t&=63))return this;var r=this.high;return t<32?fromBits(this.low>>>t|r<<32-t,r>>>t,this.unsigned):32===t?fromBits(r,0,this.unsigned):fromBits(r>>>t-32,0,this.unsigned)},A.shru=A.shiftRightUnsigned,A.shr_u=A.shiftRightUnsigned,A.toSigned=function(){return this.unsigned?fromBits(this.low,this.high,!1):this},A.toUnsigned=function(){return this.unsigned?this:fromBits(this.low,this.high,!0)},A.toBytes=function(t){return t?this.toBytesLE():this.toBytesBE()},A.toBytesLE=function(){var t=this.high,r=this.low;return[255&r,r>>>8&255,r>>>16&255,r>>>24,255&t,t>>>8&255,t>>>16&255,t>>>24]},A.toBytesBE=function(){var t=this.high,r=this.low;return[t>>>24,t>>>16&255,t>>>8&255,255&t,r>>>24,r>>>16&255,r>>>8&255,255&r]},Long.fromBytes=function(t,r,a){return a?Long.fromBytesLE(t,r):Long.fromBytesBE(t,r)},Long.fromBytesLE=function(t,r){return new Long(t[0]|t[1]<<8|t[2]<<16|t[3]<<24,t[4]|t[5]<<8|t[6]<<16|t[7]<<24,r)},Long.fromBytesBE=function(t,r){return new Long(t[4]<<24|t[5]<<16|t[6]<<8|t[7],t[0]<<24|t[1]<<16|t[2]<<8|t[3],r)}},0x1c3f085b2:(t,r,a)=>{var n=a(0x1a92d1605),s=a(0xe34412b4),i=a(0x18171aa),o=a(0x1f6cf0f7e),l=a(0x18a478568),u=a(0x1b9e70b2c),p=a(0x1591daa18);p.alea=n,p.xor128=s,p.xorwow=i,p.xorshift7=o,p.xor4096=l,p.tychei=u,t.exports=p},0x1a92d1605:function(t,r,a){var n;!function(t,s){function Alea(t){var r=this,a=Mash();r.next=function(){var t=2091639*r.s0+23283064365386963e-26*r.c;return r.s0=r.s1,r.s1=r.s2,r.s2=t-(r.c=0|t)},r.c=1,r.s0=a(" "),r.s1=a(" "),r.s2=a(" "),r.s0-=a(t),r.s0<0&&(r.s0+=1),r.s1-=a(t),r.s1<0&&(r.s1+=1),r.s2-=a(t),r.s2<0&&(r.s2+=1)}function copy(t,r){return r.c=t.c,r.s0=t.s0,r.s1=t.s1,r.s2=t.s2,r}function impl(t,r){var a=new Alea(t),n=r&&r.state,s=a.next;return s.int32=function(){return 0x100000000*a.next()|0},s.double=function(){return s()+(2097152*s()|0)*11102230246251565e-32},s.quick=s,n&&("object"==typeof n&©(n,a),s.state=function(){return copy(a,{})}),s}function Mash(){var t=0xefc8249d;return function(r){r=String(r);for(var a=0;a>>0,n-=t,n*=t,t=n>>>0,n-=t,t+=0x100000000*n}return(t>>>0)*23283064365386963e-26}}t&&t.exports?t.exports=impl:a.amdD&&a.amdO?void 0===(n=(function(){return impl}).call(r,a,r,t))||(t.exports=n):this.alea=impl}(t=a.nmd(t),a.amdD)},0x1b9e70b2c:function(t,r,a){var n;!function(t,s){function XorGen(t){var r=this,a="";r.next=function(){var t=r.b,a=r.c,n=r.d,s=r.a;return t=t<<25^t>>>7^a,a=a-n|0,n=n<<24^n>>>8^s,s=s-t|0,r.b=t=t<<20^t>>>12^a,r.c=a=a-n|0,r.d=n<<16^a>>>16^s,r.a=s-t|0},r.a=0,r.b=0,r.c=-0x61c88647,r.d=0x517cc1b7,t===Math.floor(t)?(r.a=t/0x100000000|0,r.b=0|t):a+=t;for(var n=0;n>>0)/0x100000000};return prng.double=function(){do var t=((a.next()>>>11)+(a.next()>>>0)/0x100000000)/2097152;while(0===t)return t},prng.int32=a.next,prng.quick=prng,n&&("object"==typeof n&©(n,a),prng.state=function(){return copy(a,{})}),prng}t&&t.exports?t.exports=impl:a.amdD&&a.amdO?void 0===(n=(function(){return impl}).call(r,a,r,t))||(t.exports=n):this.tychei=impl}(t=a.nmd(t),a.amdD)},0xe34412b4:function(t,r,a){var n;!function(t,s){function XorGen(t){var r=this,a="";r.x=0,r.y=0,r.z=0,r.w=0,r.next=function(){var t=r.x^r.x<<11;return r.x=r.y,r.y=r.z,r.z=r.w,r.w^=r.w>>>19^t^t>>>8},t===(0|t)?r.x=t:a+=t;for(var n=0;n>>0)/0x100000000};return prng.double=function(){do var t=((a.next()>>>11)+(a.next()>>>0)/0x100000000)/2097152;while(0===t)return t},prng.int32=a.next,prng.quick=prng,n&&("object"==typeof n&©(n,a),prng.state=function(){return copy(a,{})}),prng}t&&t.exports?t.exports=impl:a.amdD&&a.amdO?void 0===(n=(function(){return impl}).call(r,a,r,t))||(t.exports=n):this.xor128=impl}(t=a.nmd(t),a.amdD)},0x18a478568:function(t,r,a){var n;!function(t,s){function XorGen(t){var r=this;r.next=function(){var t,a,n=r.w,s=r.X,i=r.i;return r.w=n=n+0x61c88647|0,a=s[i+34&127],t=s[i=i+1&127],a^=a<<13,t^=t<<17,a^=a>>>15,t^=t>>>12,a=s[i]=a^t,r.i=i,a+(n^n>>>16)|0},function(t,r){var a,n,s,i,o,l=[],u=128;for(r===(0|r)?(n=r,r=null):(r+="\0",n=0,u=Math.max(u,r.length)),s=0,i=-32;i>>15,n^=n<<4,n^=n>>>13,i>=0&&(o=o+0x61c88647|0,s=0==(a=l[127&i]^=n+o)?s+1:0);for(s>=128&&(l[127&(r&&r.length||0)]=-1),s=127,i=512;i>0;--i)n=l[s+34&127],a=l[s=s+1&127],n^=n<<13,a^=a<<17,n^=n>>>15,a^=a>>>12,l[s]=n^a;t.w=o,t.X=l,t.i=s}(r,t)}function copy(t,r){return r.i=t.i,r.w=t.w,r.X=t.X.slice(),r}function impl(t,r){null==t&&(t=+new Date);var a=new XorGen(t),n=r&&r.state,prng=function(){return(a.next()>>>0)/0x100000000};return prng.double=function(){do var t=((a.next()>>>11)+(a.next()>>>0)/0x100000000)/2097152;while(0===t)return t},prng.int32=a.next,prng.quick=prng,n&&(n.X&©(n,a),prng.state=function(){return copy(a,{})}),prng}t&&t.exports?t.exports=impl:a.amdD&&a.amdO?void 0===(n=(function(){return impl}).call(r,a,r,t))||(t.exports=n):this.xor4096=impl}(t=a.nmd(t),a.amdD)},0x1f6cf0f7e:function(t,r,a){var n;!function(t,s){function XorGen(t){var r=this;r.next=function(){var t,a,n=r.x,s=r.i;return t=n[s],t^=t>>>7,a=t^t<<24^((t=n[s+1&7])^t>>>10)^((t=n[s+3&7])^t>>>3)^((t=n[s+4&7])^t<<7),t=n[s+7&7],t^=t<<13,a^=t^t<<9,n[s]=a,r.i=s+1&7,a},function(t,r){var a,n=[];if(r===(0|r))n[0]=r;else for(a=0,r=""+r;a0;--a)t.next()}(r,t)}function copy(t,r){return r.x=t.x.slice(),r.i=t.i,r}function impl(t,r){null==t&&(t=+new Date);var a=new XorGen(t),n=r&&r.state,prng=function(){return(a.next()>>>0)/0x100000000};return prng.double=function(){do var t=((a.next()>>>11)+(a.next()>>>0)/0x100000000)/2097152;while(0===t)return t},prng.int32=a.next,prng.quick=prng,n&&(n.x&©(n,a),prng.state=function(){return copy(a,{})}),prng}t&&t.exports?t.exports=impl:a.amdD&&a.amdO?void 0===(n=(function(){return impl}).call(r,a,r,t))||(t.exports=n):this.xorshift7=impl}(t=a.nmd(t),a.amdD)},0x18171aa:function(t,r,a){var n;!function(t,s){function XorGen(t){var r=this,a="";r.next=function(){var t=r.x^r.x>>>2;return r.x=r.y,r.y=r.z,r.z=r.w,r.w=r.v,(r.d=r.d+362437|0)+(r.v=r.v^r.v<<4^(t^t<<1))|0},r.x=0,r.y=0,r.z=0,r.w=0,r.v=0,t===(0|t)?r.x=t:a+=t;for(var n=0;n>>4),r.next()}function copy(t,r){return r.x=t.x,r.y=t.y,r.z=t.z,r.w=t.w,r.v=t.v,r.d=t.d,r}function impl(t,r){var a=new XorGen(t),n=r&&r.state,prng=function(){return(a.next()>>>0)/0x100000000};return prng.double=function(){do var t=((a.next()>>>11)+(a.next()>>>0)/0x100000000)/2097152;while(0===t)return t},prng.int32=a.next,prng.quick=prng,n&&("object"==typeof n&©(n,a),prng.state=function(){return copy(a,{})}),prng}t&&t.exports?t.exports=impl:a.amdD&&a.amdO?void 0===(n=(function(){return impl}).call(r,a,r,t))||(t.exports=n):this.xorwow=impl}(t=a.nmd(t),a.amdD)},0x1591daa18:function(t,r,a){var n;!function(s,i,o){var l,u=o.pow(256,6),p=o.pow(2,52),m=2*p;function seedrandom(t,r,a){var n=[],s=mixkey(flatten((r=!0==r?{entropy:!0}:r||{}).entropy?[t,tostring(i)]:null==t?autoseed():t,3),n),l=new ARC4(n),prng=function(){for(var t=l.g(6),r=u,a=0;t=m;)t/=2,r/=2,a>>>=1;return(t+a)/r};return prng.int32=function(){return 0|l.g(4)},prng.quick=function(){return l.g(4)/0x100000000},prng.double=prng,mixkey(tostring(l.S),i),(r.pass||a||function(t,r,a,n){return(n&&(n.S&©(n,l),t.state=function(){return copy(l,{})}),a)?(o.random=t,r):t})(prng,s,"global"in r?r.global:this==o,r.state)}function ARC4(t){var r,a=t.length,n=this,s=0,i=n.i=n.j=0,o=n.S=[];for(a||(t=[a++]);s<256;)o[s]=s++;for(s=0;s<256;s++)o[s]=o[i=255&i+t[s%a]+(r=o[s])],o[i]=r;(n.g=function(t){for(var r,a=0,s=n.i,i=n.j,o=n.S;t--;)r=o[s=255&s+1],a=256*a+o[255&(o[s]=o[i=255&i+r])+(o[i]=r)];return n.i=s,n.j=i,a})(256)}function copy(t,r){return r.i=t.i,r.j=t.j,r.S=t.S.slice(),r}function flatten(t,r){var a,n=[],s=typeof t;if(r&&"object"==s)for(a in t)try{n.push(flatten(t[a],r-1))}catch{}return n.length?n:"string"==s?t:t+"\0"}function mixkey(t,r){for(var a,n=t+"",s=0;stypeof self?self:this,[],Math)},0x1564d7324:(t,r,a)=>{"use strict";t.exports=a.p+"selfie_segmentation-8105863.binarypb"},0x1289871b1:(t,r,a)=>{"use strict";t.exports=a.p+"selfie_segmentation_landscape-42d35e9.tflite"},0x1047389d9:(t,r,a)=>{"use strict";t.exports=a.p+"selfie_segmentation_solution_simd_wasm_bin.0e7b9ccf311c3e953191.bin.js"},0x19f9de028:(t,r,a)=>{"use strict";t.exports=a.p+"selfie_segmentation_solution_simd_wasm_bin-be775ab.wasm"},0x67a17711:(t,r,a)=>{"use strict";t.exports=a.p+"selfie_segmentation_solution_wasm_bin.f87d09d7c30321df692e.bin.js"},0xc369e6e0:(t,r,a)=>{"use strict";t.exports=a.p+"selfie_segmentation_solution_wasm_bin-85be6af.wasm"}}]); //# sourceMappingURL=https://slack.com/source-maps/bv1-13/gantry-v2-async-vendors-node_modules_mediapipe_selfie_segmentation_selfie_segmentation_js-node_modules_tensor-f12dad.843def897efacb26bb27.min.js.mapØA —Eoúô