Skip to content

Commit 8388f98

Browse files
committed
fix browser test?
1 parent 3e5fc9f commit 8388f98

File tree

1 file changed

+93
-91
lines changed

1 file changed

+93
-91
lines changed

packages/hub/src/utils/XetBlob.ts

Lines changed: 93 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,7 @@ export class XetBlob extends Blob {
278278

279279
let leftoverBytes: Uint8Array | undefined = undefined;
280280

281-
while (!done && totalBytesRead < maxBytes) {
281+
fetchData: while (!done && totalBytesRead < maxBytes) {
282282
const result = await reader.read();
283283
done = result.done;
284284

@@ -293,107 +293,109 @@ export class XetBlob extends Blob {
293293
leftoverBytes = undefined;
294294
}
295295

296-
if (result.value.length < 8) {
297-
// We need 8 bytes to parse the chunk header
298-
leftoverBytes = result.value;
299-
continue;
300-
}
301-
302-
const header = new DataView(result.value.buffer, result.value.byteOffset, CHUNK_HEADER_BYTES);
303-
const chunkHeader: ChunkHeader = {
304-
version: header.getUint8(0),
305-
compressed_length: header.getUint8(1) | (header.getUint8(2) << 8) | (header.getUint8(3) << 16),
306-
compression_scheme: header.getUint8(4),
307-
uncompressed_length: header.getUint8(5) | (header.getUint8(6) << 8) | (header.getUint8(7) << 16),
308-
};
296+
while (totalBytesRead < maxBytes) {
297+
if (result.value.length < 8) {
298+
// We need 8 bytes to parse the chunk header
299+
leftoverBytes = result.value;
300+
continue fetchData;
301+
}
309302

310-
console.log("chunk header", chunkHeader);
303+
const header = new DataView(result.value.buffer, result.value.byteOffset, CHUNK_HEADER_BYTES);
304+
const chunkHeader: ChunkHeader = {
305+
version: header.getUint8(0),
306+
compressed_length: header.getUint8(1) | (header.getUint8(2) << 8) | (header.getUint8(3) << 16),
307+
compression_scheme: header.getUint8(4),
308+
uncompressed_length: header.getUint8(5) | (header.getUint8(6) << 8) | (header.getUint8(7) << 16),
309+
};
311310

312-
if (chunkHeader.version !== 0) {
313-
throw new Error(`Unsupported chunk version ${chunkHeader.version}`);
314-
}
311+
console.log("chunk header", chunkHeader);
315312

316-
if (
317-
chunkHeader.compression_scheme !== CompressionScheme.None &&
318-
chunkHeader.compression_scheme !== CompressionScheme.LZ4 &&
319-
chunkHeader.compression_scheme !== CompressionScheme.ByteGroupingLZ4
320-
) {
321-
throw new Error(
322-
`Unsupported compression scheme ${
323-
compressionSchemeLabels[chunkHeader.compression_scheme] ?? chunkHeader.compression_scheme
324-
}`
325-
);
326-
}
327-
328-
if (result.value.length < chunkHeader.compressed_length + CHUNK_HEADER_BYTES) {
329-
// We need more data to read the full chunk
330-
leftoverBytes = result.value;
331-
continue;
332-
}
313+
if (chunkHeader.version !== 0) {
314+
throw new Error(`Unsupported chunk version ${chunkHeader.version}`);
315+
}
333316

334-
result.value = result.value.slice(CHUNK_HEADER_BYTES);
335-
336-
let uncompressed =
337-
chunkHeader.compression_scheme === CompressionScheme.LZ4
338-
? lz4_decompress(result.value.slice(0, chunkHeader.compressed_length), chunkHeader.uncompressed_length)
339-
: chunkHeader.compression_scheme === CompressionScheme.ByteGroupingLZ4
340-
? bg4_regoup_bytes(
341-
lz4_decompress(
342-
result.value.slice(0, chunkHeader.compressed_length),
343-
chunkHeader.uncompressed_length
344-
)
345-
)
346-
: result.value.slice(0, chunkHeader.compressed_length);
347-
348-
const range = ranges.find((range) => chunkIndex >= range.start && chunkIndex < range.end);
349-
const shouldYield = chunkIndex >= term.range.start && chunkIndex < term.range.end;
350-
const minRefCountToStore = shouldYield ? 2 : 1;
351-
let stored = false;
352-
353-
// Assuming non-overlapping fetch_info ranges for the same hash
354-
if (range && range.refCount >= minRefCountToStore) {
355-
range.data ??= [];
356-
range.data.push(uncompressed);
357-
stored = true;
358-
}
317+
if (
318+
chunkHeader.compression_scheme !== CompressionScheme.None &&
319+
chunkHeader.compression_scheme !== CompressionScheme.LZ4 &&
320+
chunkHeader.compression_scheme !== CompressionScheme.ByteGroupingLZ4
321+
) {
322+
throw new Error(
323+
`Unsupported compression scheme ${
324+
compressionSchemeLabels[chunkHeader.compression_scheme] ?? chunkHeader.compression_scheme
325+
}`
326+
);
327+
}
359328

360-
if (shouldYield) {
361-
if (readBytesToSkip) {
362-
const skipped = Math.min(readBytesToSkip, uncompressed.length);
363-
uncompressed = uncompressed.slice(readBytesToSkip);
364-
readBytesToSkip -= skipped;
329+
if (result.value.length < chunkHeader.compressed_length + CHUNK_HEADER_BYTES) {
330+
// We need more data to read the full chunk
331+
leftoverBytes = result.value;
332+
continue fetchData;
365333
}
366334

367-
if (uncompressed.length > maxBytes - totalBytesRead) {
368-
uncompressed = uncompressed.slice(0, maxBytes - totalBytesRead);
335+
result.value = result.value.slice(CHUNK_HEADER_BYTES);
336+
337+
let uncompressed =
338+
chunkHeader.compression_scheme === CompressionScheme.LZ4
339+
? lz4_decompress(result.value.slice(0, chunkHeader.compressed_length), chunkHeader.uncompressed_length)
340+
: chunkHeader.compression_scheme === CompressionScheme.ByteGroupingLZ4
341+
? bg4_regoup_bytes(
342+
lz4_decompress(
343+
result.value.slice(0, chunkHeader.compressed_length),
344+
chunkHeader.uncompressed_length
345+
)
346+
)
347+
: result.value.slice(0, chunkHeader.compressed_length);
348+
349+
const range = ranges.find((range) => chunkIndex >= range.start && chunkIndex < range.end);
350+
const shouldYield = chunkIndex >= term.range.start && chunkIndex < term.range.end;
351+
const minRefCountToStore = shouldYield ? 2 : 1;
352+
let stored = false;
353+
354+
// Assuming non-overlapping fetch_info ranges for the same hash
355+
if (range && range.refCount >= minRefCountToStore) {
356+
range.data ??= [];
357+
range.data.push(uncompressed);
358+
stored = true;
369359
}
370360

371-
if (uncompressed.length) {
372-
console.log(
373-
"yield",
374-
uncompressed.length,
375-
"bytes",
376-
result.value.length,
377-
"total read",
378-
totalBytesRead,
379-
stored
380-
);
381-
totalBytesRead += uncompressed.length;
382-
yield stored ? uncompressed.slice() : uncompressed;
383-
console.log(
384-
"yielded",
385-
uncompressed.length,
386-
"bytes",
387-
result.value.length,
388-
"total read",
389-
totalBytesRead,
390-
stored
391-
);
361+
if (shouldYield) {
362+
if (readBytesToSkip) {
363+
const skipped = Math.min(readBytesToSkip, uncompressed.length);
364+
uncompressed = uncompressed.slice(readBytesToSkip);
365+
readBytesToSkip -= skipped;
366+
}
367+
368+
if (uncompressed.length > maxBytes - totalBytesRead) {
369+
uncompressed = uncompressed.slice(0, maxBytes - totalBytesRead);
370+
}
371+
372+
if (uncompressed.length) {
373+
console.log(
374+
"yield",
375+
uncompressed.length,
376+
"bytes",
377+
result.value.length,
378+
"total read",
379+
totalBytesRead,
380+
stored
381+
);
382+
totalBytesRead += uncompressed.length;
383+
yield stored ? uncompressed.slice() : uncompressed;
384+
console.log(
385+
"yielded",
386+
uncompressed.length,
387+
"bytes",
388+
result.value.length,
389+
"total read",
390+
totalBytesRead,
391+
stored
392+
);
393+
}
392394
}
393-
}
394395

395-
chunkIndex++;
396-
leftoverBytes = result.value.slice(chunkHeader.compressed_length);
396+
chunkIndex++;
397+
result.value = result.value.slice(chunkHeader.compressed_length);
398+
}
397399
}
398400

399401
console.log("done", done, "total read", totalBytesRead);

0 commit comments

Comments
 (0)