summaryrefslogtreecommitdiff
path: root/test/parallel/test-blob-file-backed.js
blob: b25137a34c3d01d31b722ee398c06f901fdf527c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
'use strict';

const common = require('../common');
const {
  strictEqual,
  rejects,
  throws,
} = require('assert');
const { TextDecoder } = require('util');
const {
  writeFileSync,
  openAsBlob,
} = require('fs');

const {
  unlink
} = require('fs/promises');
const path = require('path');
const { Blob } = require('buffer');

const tmpdir = require('../common/tmpdir');
const testfile = path.join(tmpdir.path, 'test-file-backed-blob.txt');
const testfile2 = path.join(tmpdir.path, 'test-file-backed-blob2.txt');
const testfile3 = path.join(tmpdir.path, 'test-file-backed-blob3.txt');
tmpdir.refresh();

const data = `${'a'.repeat(1000)}${'b'.repeat(2000)}`;

writeFileSync(testfile, data);
writeFileSync(testfile2, data.repeat(100));
writeFileSync(testfile3, '');

(async () => {
  const blob = await openAsBlob(testfile);

  const ab = await blob.arrayBuffer();
  const dec = new TextDecoder();

  strictEqual(dec.decode(new Uint8Array(ab)), data);
  strictEqual(await blob.text(), data);

  // Can be read multiple times
  let stream = blob.stream();
  let check = '';
  for await (const chunk of stream)
    check = dec.decode(chunk);
  strictEqual(check, data);

  // Can be combined with other Blob's and read
  const combined = new Blob(['hello', blob, 'world']);
  const ab2 = await combined.arrayBuffer();
  strictEqual(dec.decode(ab2.slice(0, 5)), 'hello');
  strictEqual(dec.decode(ab2.slice(5, -5)), data);
  strictEqual(dec.decode(ab2.slice(-5)), 'world');

  // If the file is modified tho, the stream errors.
  writeFileSync(testfile, data + 'abc');

  stream = blob.stream();

  const read = async () => {
    // eslint-disable-next-line no-unused-vars, no-empty
    for await (const _ of stream) {}
  };

  await rejects(read(), { name: 'NotReadableError' });

  await unlink(testfile);
})().then(common.mustCall());

(async () => {
  const blob = await openAsBlob(testfile2);
  const stream = blob.stream();
  const read = async () => {
    // eslint-disable-next-line no-unused-vars
    for await (const _ of stream) {
      writeFileSync(testfile2, data + 'abc');
    }
  };

  await rejects(read(), { name: 'NotReadableError' });

  await unlink(testfile2);
})().then(common.mustCall());

(async () => {
  const blob = await openAsBlob(testfile3);
  strictEqual(blob.size, 0);
  strictEqual(await blob.text(), '');
  writeFileSync(testfile3, 'abc');
  await rejects(blob.text(), { name: 'NotReadableError' });
  await unlink(testfile3);
})().then(common.mustCall());

(async () => {
  const blob = await openAsBlob(testfile3);
  strictEqual(blob.size, 0);
  writeFileSync(testfile3, 'abc');
  const stream = blob.stream();
  const reader = stream.getReader();
  await rejects(() => reader.read(), { name: 'NotReadableError' });
})().then(common.mustCall());

(async () => {
  // We currently do not allow File-backed blobs to be cloned or transfered
  // across worker threads. This is largely because the underlying FdEntry
  // is bound to the Environment/Realm under which is was created.
  const blob = await openAsBlob(__filename);
  throws(() => structuredClone(blob), {
    code: 'ERR_INVALID_STATE',
    message: 'Invalid state: File-backed Blobs are not cloneable'
  });
})().then(common.mustCall());