1
- import * as duckdb from '../src/' ;
2
- import { LogLevel } from '../src/' ;
1
+ import {
2
+ AsyncDuckDB ,
3
+ AsyncDuckDBConnection ,
4
+ ConsoleLogger ,
5
+ DuckDBAccessMode ,
6
+ DuckDBBundle ,
7
+ DuckDBDataProtocol ,
8
+ LogLevel
9
+ } from '../src/' ;
3
10
import * as arrow from 'apache-arrow' ;
4
11
5
- export function testOPFS ( baseDir : string , bundle : ( ) => duckdb . DuckDBBundle ) : void {
6
- let db : duckdb . AsyncDuckDB ;
7
- let conn : duckdb . AsyncDuckDBConnection ;
12
+ export function testOPFS ( baseDir : string , bundle : ( ) => DuckDBBundle ) : void {
13
+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
14
+
15
+ let db : AsyncDuckDB ;
16
+ let conn : AsyncDuckDBConnection ;
8
17
9
18
beforeAll ( async ( ) => {
10
- removeFiles ( ) ;
19
+ await removeFiles ( ) ;
11
20
} ) ;
12
21
13
22
afterAll ( async ( ) => {
@@ -17,19 +26,18 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
17
26
if ( db ) {
18
27
await db . terminate ( ) ;
19
28
}
20
- removeFiles ( ) ;
29
+ await removeFiles ( ) ;
21
30
} ) ;
22
31
23
32
beforeEach ( async ( ) => {
24
- removeFiles ( ) ;
33
+ await removeFiles ( ) ;
25
34
//
26
- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
27
35
const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
28
- db = new duckdb . AsyncDuckDB ( logger , worker ) ;
36
+ db = new AsyncDuckDB ( logger , worker ) ;
29
37
await db . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
30
38
await db . open ( {
31
39
path : 'opfs://test.db' ,
32
- accessMode : duckdb . DuckDBAccessMode . READ_WRITE
40
+ accessMode : DuckDBAccessMode . READ_WRITE
33
41
} ) ;
34
42
conn = await db . connect ( ) ;
35
43
} ) ;
@@ -41,12 +49,12 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
41
49
if ( db ) {
42
50
await db . terminate ( ) ;
43
51
}
44
- removeFiles ( ) ;
52
+ await removeFiles ( ) ;
45
53
} ) ;
46
54
47
55
describe ( 'Load Data in OPFS' , ( ) => {
48
56
it ( 'Import Small Parquet file' , async ( ) => {
49
- await conn . send ( `CREATE TABLE stu AS SELECT * FROM "${ baseDir } /uni/studenten.parquet"` ) ;
57
+ await conn . send ( `CREATE TABLE stu AS SELECT * FROM "${ baseDir } /uni/studenten.parquet"` ) ;
50
58
await conn . send ( `CHECKPOINT;` ) ;
51
59
const result = await conn . send ( `SELECT matrnr FROM stu;` ) ;
52
60
const batches = [ ] ;
@@ -60,7 +68,7 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
60
68
} ) ;
61
69
62
70
it ( 'Import Larget Parquet file' , async ( ) => {
63
- await conn . send ( `CREATE TABLE lineitem AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
71
+ await conn . send ( `CREATE TABLE lineitem AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
64
72
await conn . send ( `CHECKPOINT;` ) ;
65
73
const result = await conn . send ( `SELECT count(*)::INTEGER as cnt FROM lineitem;` ) ;
66
74
const batches = [ ] ;
@@ -72,18 +80,17 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
72
80
} ) ;
73
81
74
82
it ( 'Load Existing DB File' , async ( ) => {
75
- await conn . send ( `CREATE TABLE tmp AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
83
+ await conn . send ( `CREATE TABLE tmp AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
76
84
await conn . send ( `CHECKPOINT;` ) ;
77
85
await conn . close ( ) ;
78
86
await db . terminate ( ) ;
79
87
80
- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
81
88
const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
82
- db = new duckdb . AsyncDuckDB ( logger , worker ) ;
89
+ db = new AsyncDuckDB ( logger , worker ) ;
83
90
await db . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
84
91
await db . open ( {
85
92
path : 'opfs://test.db' ,
86
- accessMode : duckdb . DuckDBAccessMode . READ_WRITE
93
+ accessMode : DuckDBAccessMode . READ_WRITE
87
94
} ) ;
88
95
conn = await db . connect ( ) ;
89
96
@@ -98,16 +105,16 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
98
105
99
106
it ( 'Load Parquet file that are already with empty handler' , async ( ) => {
100
107
//1. write to opfs
101
- const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
108
+ const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
102
109
res . arrayBuffer ( ) ,
103
110
) ;
104
111
const opfsRoot = await navigator . storage . getDirectory ( ) ;
105
- const fileHandle = await opfsRoot . getFileHandle ( 'test.parquet' , { create : true } ) ;
112
+ const fileHandle = await opfsRoot . getFileHandle ( 'test.parquet' , { create : true } ) ;
106
113
const writable = await fileHandle . createWritable ( ) ;
107
114
await writable . write ( parquetBuffer ) ;
108
115
await writable . close ( ) ;
109
116
//2. handle is empty object, because worker gets a File Handle using the file name.
110
- await db . registerFileHandle ( 'test.parquet' , null , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
117
+ await db . registerFileHandle ( 'test.parquet' , null , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
111
118
await conn . send ( `CREATE TABLE lineitem1 AS SELECT * FROM read_parquet('test.parquet')` ) ;
112
119
await conn . send ( `CHECKPOINT;` ) ;
113
120
@@ -122,17 +129,17 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
122
129
123
130
it ( 'Load Parquet file that are already with opfs file handler in datadir' , async ( ) => {
124
131
//1. write to opfs
125
- const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
132
+ const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
126
133
res . arrayBuffer ( ) ,
127
134
) ;
128
135
const opfsRoot = await navigator . storage . getDirectory ( ) ;
129
- const datadir = await opfsRoot . getDirectoryHandle ( "datadir" , { create : true } ) ;
130
- const fileHandle = await datadir . getFileHandle ( 'test.parquet' , { create : true } ) ;
136
+ const datadir = await opfsRoot . getDirectoryHandle ( "datadir" , { create : true } ) ;
137
+ const fileHandle = await datadir . getFileHandle ( 'test.parquet' , { create : true } ) ;
131
138
const writable = await fileHandle . createWritable ( ) ;
132
139
await writable . write ( parquetBuffer ) ;
133
140
await writable . close ( ) ;
134
141
//2. handle is opfs file handler
135
- await db . registerFileHandle ( 'test.parquet' , fileHandle , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
142
+ await db . registerFileHandle ( 'test.parquet' , fileHandle , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
136
143
await conn . send ( `CREATE TABLE lineitem1 AS SELECT * FROM read_parquet('test.parquet')` ) ;
137
144
await conn . send ( `CHECKPOINT;` ) ;
138
145
@@ -146,16 +153,16 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
146
153
} ) ;
147
154
148
155
it ( 'Load Parquet file that are already' , async ( ) => {
149
- const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
156
+ const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
150
157
res . arrayBuffer ( ) ,
151
158
) ;
152
159
const opfsRoot = await navigator . storage . getDirectory ( ) ;
153
- const fileHandle = await opfsRoot . getFileHandle ( 'test.parquet' , { create : true } ) ;
160
+ const fileHandle = await opfsRoot . getFileHandle ( 'test.parquet' , { create : true } ) ;
154
161
const writable = await fileHandle . createWritable ( ) ;
155
162
await writable . write ( parquetBuffer ) ;
156
163
await writable . close ( ) ;
157
164
158
- await db . registerFileHandle ( 'test.parquet' , fileHandle , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
165
+ await db . registerFileHandle ( 'test.parquet' , fileHandle , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
159
166
await conn . send ( `CREATE TABLE lineitem1 AS SELECT * FROM read_parquet('test.parquet')` ) ;
160
167
await conn . send ( `CHECKPOINT;` ) ;
161
168
await conn . send ( `CREATE TABLE lineitem2 AS SELECT * FROM read_parquet('test.parquet')` ) ;
@@ -197,9 +204,9 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
197
204
198
205
it ( 'Drop File + Export as CSV to OPFS + Load CSV' , async ( ) => {
199
206
const opfsRoot = await navigator . storage . getDirectory ( ) ;
200
- const testHandle = await opfsRoot . getFileHandle ( 'test.csv' , { create : true } ) ;
201
- await db . registerFileHandle ( 'test.csv' , testHandle , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
202
- await conn . send ( `CREATE TABLE zzz AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
207
+ const testHandle = await opfsRoot . getFileHandle ( 'test.csv' , { create : true } ) ;
208
+ await db . registerFileHandle ( 'test.csv' , testHandle , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
209
+ await conn . send ( `CREATE TABLE zzz AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
203
210
await conn . send ( `COPY (SELECT * FROM zzz) TO 'test.csv'` ) ;
204
211
await conn . send ( `COPY (SELECT * FROM zzz) TO 'non_existing.csv'` ) ;
205
212
await conn . close ( ) ;
@@ -208,7 +215,7 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
208
215
209
216
await db . open ( { } ) ;
210
217
conn = await db . connect ( ) ;
211
- await db . registerFileHandle ( 'test.csv' , testHandle , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
218
+ await db . registerFileHandle ( 'test.csv' , testHandle , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
212
219
213
220
const result = await conn . send ( `SELECT count(*)::INTEGER as cnt FROM 'test.csv';` ) ;
214
221
const batches = [ ] ;
@@ -224,14 +231,14 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
224
231
225
232
it ( 'Drop Files + Export as CSV to OPFS + Load CSV' , async ( ) => {
226
233
const opfsRoot = await navigator . storage . getDirectory ( ) ;
227
- const testHandle1 = await opfsRoot . getFileHandle ( 'test1.csv' , { create : true } ) ;
228
- const testHandle2 = await opfsRoot . getFileHandle ( 'test2.csv' , { create : true } ) ;
229
- const testHandle3 = await opfsRoot . getFileHandle ( 'test3.csv' , { create : true } ) ;
230
- await db . registerFileHandle ( 'test1.csv' , testHandle1 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
231
- await db . registerFileHandle ( 'test2.csv' , testHandle2 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
232
- await db . registerFileHandle ( 'test3.csv' , testHandle3 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
233
-
234
- await conn . send ( `CREATE TABLE zzz AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
234
+ const testHandle1 = await opfsRoot . getFileHandle ( 'test1.csv' , { create : true } ) ;
235
+ const testHandle2 = await opfsRoot . getFileHandle ( 'test2.csv' , { create : true } ) ;
236
+ const testHandle3 = await opfsRoot . getFileHandle ( 'test3.csv' , { create : true } ) ;
237
+ await db . registerFileHandle ( 'test1.csv' , testHandle1 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
238
+ await db . registerFileHandle ( 'test2.csv' , testHandle2 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
239
+ await db . registerFileHandle ( 'test3.csv' , testHandle3 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
240
+
241
+ await conn . send ( `CREATE TABLE zzz AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
235
242
await conn . send ( `COPY (SELECT * FROM zzz) TO 'test1.csv'` ) ;
236
243
await conn . send ( `COPY (SELECT * FROM zzz) TO 'test2.csv'` ) ;
237
244
await conn . send ( `COPY (SELECT * FROM zzz) TO 'test3.csv'` ) ;
@@ -242,9 +249,9 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
242
249
243
250
await db . open ( { } ) ;
244
251
conn = await db . connect ( ) ;
245
- await db . registerFileHandle ( 'test1.csv' , testHandle1 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
246
- await db . registerFileHandle ( 'test2.csv' , testHandle2 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
247
- await db . registerFileHandle ( 'test3.csv' , testHandle3 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
252
+ await db . registerFileHandle ( 'test1.csv' , testHandle1 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
253
+ await db . registerFileHandle ( 'test2.csv' , testHandle2 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
254
+ await db . registerFileHandle ( 'test3.csv' , testHandle3 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
248
255
249
256
{
250
257
const result1 = await conn . send ( `SELECT count(*)::INTEGER as cnt FROM 'test1.csv';` ) ;
@@ -280,14 +287,14 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
280
287
281
288
describe ( 'Open database in OPFS' , ( ) => {
282
289
it ( 'should not open a non-existent DB file in read-only' , async ( ) => {
283
- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
290
+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
284
291
const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
285
- const db_ = new duckdb . AsyncDuckDB ( logger , worker ) ;
292
+ const db_ = new AsyncDuckDB ( logger , worker ) ;
286
293
await db_ . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
287
294
288
295
await expectAsync ( db_ . open ( {
289
296
path : 'opfs://non_existent.db' ,
290
- accessMode : duckdb . DuckDBAccessMode . READ_ONLY ,
297
+ accessMode : DuckDBAccessMode . READ_ONLY ,
291
298
} ) ) . toBeRejectedWithError ( Error , / f i l e o r d i r e c t o r y c o u l d n o t b e f o u n d / ) ;
292
299
293
300
await db_ . terminate ( ) ;
@@ -300,39 +307,39 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
300
307
} ) ;
301
308
302
309
it ( 'should not open a non-existent DB file and mkdir in read-only' , async ( ) => {
303
- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
310
+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
304
311
const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
305
- const db_ = new duckdb . AsyncDuckDB ( logger , worker ) ;
312
+ const db_ = new AsyncDuckDB ( logger , worker ) ;
306
313
await db_ . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
307
314
308
315
await expectAsync ( db_ . open ( {
309
316
path : 'opfs://duckdb_test/path/to/non_existent.db' ,
310
- accessMode : duckdb . DuckDBAccessMode . READ_ONLY ,
317
+ accessMode : DuckDBAccessMode . READ_ONLY ,
311
318
} ) ) . toBeRejectedWithError ( Error , / f i l e o r d i r e c t o r y c o u l d n o t b e f o u n d / ) ;
312
319
313
320
await db_ . terminate ( ) ;
314
321
await worker . terminate ( ) ;
315
322
} ) ;
316
323
317
324
it ( 'should open a non-existent DB file and mkdir in read-write' , async ( ) => {
318
- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
325
+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
319
326
const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
320
- const db_ = new duckdb . AsyncDuckDB ( logger , worker ) ;
327
+ const db_ = new AsyncDuckDB ( logger , worker ) ;
321
328
await db_ . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
322
329
323
330
await expectAsync ( db_ . open ( {
324
331
path : 'opfs://duckdb_test/path/to/duck.db' ,
325
- accessMode : duckdb . DuckDBAccessMode . READ_WRITE ,
332
+ accessMode : DuckDBAccessMode . READ_WRITE ,
326
333
} ) ) . toBeResolved ( ) ;
327
334
328
335
await db_ . terminate ( ) ;
329
336
await worker . terminate ( ) ;
330
337
} ) ;
331
338
332
339
it ( 'should open a non-existent DB file in read-write and create files' , async ( ) => {
333
- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
340
+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
334
341
const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
335
- const db_ = new duckdb . AsyncDuckDB ( logger , worker ) ;
342
+ const db_ = new AsyncDuckDB ( logger , worker ) ;
336
343
await db_ . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
337
344
338
345
const opfsRoot = await navigator . storage . getDirectory ( ) ;
@@ -345,7 +352,7 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
345
352
346
353
await expectAsync ( db_ . open ( {
347
354
path : 'opfs://non_existent_2.db' ,
348
- accessMode : duckdb . DuckDBAccessMode . READ_WRITE ,
355
+ accessMode : DuckDBAccessMode . READ_WRITE ,
349
356
} ) ) . toBeResolved ( ) ;
350
357
351
358
await db_ . terminate ( ) ;
@@ -359,25 +366,29 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
359
366
360
367
async function removeFiles ( ) {
361
368
const opfsRoot = await navigator . storage . getDirectory ( ) ;
362
- await opfsRoot . removeEntry ( 'test.db' ) . catch ( ( ) => { } ) ;
363
- await opfsRoot . removeEntry ( 'test.db.wal' ) . catch ( ( ) => { } ) ;
364
- await opfsRoot . removeEntry ( 'test.csv' ) . catch ( ( ) => { } ) ;
365
- await opfsRoot . removeEntry ( 'test1.csv' ) . catch ( ( ) => { } ) ;
366
- await opfsRoot . removeEntry ( 'test2.csv' ) . catch ( ( ) => { } ) ;
367
- await opfsRoot . removeEntry ( 'test3.csv' ) . catch ( ( ) => { } ) ;
368
- await opfsRoot . removeEntry ( 'test.parquet' ) . catch ( ( ) => { } ) ;
369
+
370
+ await opfsRoot . removeEntry ( 'test.db' ) . catch ( _ignore ) ;
371
+ await opfsRoot . removeEntry ( 'test.db.wal' ) . catch ( _ignore ) ;
372
+ await opfsRoot . removeEntry ( 'test.csv' ) . catch ( _ignore ) ;
373
+ await opfsRoot . removeEntry ( 'test1.csv' ) . catch ( _ignore ) ;
374
+ await opfsRoot . removeEntry ( 'test2.csv' ) . catch ( _ignore ) ;
375
+ await opfsRoot . removeEntry ( 'test3.csv' ) . catch ( _ignore ) ;
376
+ await opfsRoot . removeEntry ( 'test.parquet' ) . catch ( _ignore ) ;
369
377
try {
370
378
const datadir = await opfsRoot . getDirectoryHandle ( 'datadir' ) ;
371
- datadir . removeEntry ( 'test.parquet' ) . catch ( ( ) => { } ) ;
379
+ datadir . removeEntry ( 'test.parquet' ) . catch ( _ignore ) ;
372
380
} catch ( e ) {
373
381
//
374
382
}
375
- await opfsRoot . removeEntry ( 'datadir' ) . catch ( ( ) => { } ) ;
383
+ await opfsRoot . removeEntry ( 'datadir' ) . catch ( _ignore ) ;
376
384
// In case of failure caused leftovers
377
- await opfsRoot . removeEntry ( 'non_existent.db' ) . catch ( ( ) => { } ) ;
378
- await opfsRoot . removeEntry ( 'non_existent.db.wal' ) . catch ( ( ) => { } ) ;
379
- await opfsRoot . removeEntry ( 'non_existent_2.db' ) . catch ( ( ) => { } ) ;
380
- await opfsRoot . removeEntry ( 'non_existent_2.db.wal' ) . catch ( ( ) => { } ) ;
381
- await opfsRoot . removeEntry ( 'duckdb_test' , { recursive : true } ) . catch ( ( ) => { } ) ;
385
+ await opfsRoot . removeEntry ( 'non_existent.db' ) . catch ( _ignore ) ;
386
+ await opfsRoot . removeEntry ( 'non_existent.db.wal' ) . catch ( _ignore ) ;
387
+ await opfsRoot . removeEntry ( 'non_existent_2.db' ) . catch ( _ignore ) ;
388
+ await opfsRoot . removeEntry ( 'non_existent_2.db.wal' ) . catch ( _ignore ) ;
389
+ await opfsRoot . removeEntry ( 'duckdb_test' , { recursive : true } ) . catch ( _ignore ) ;
382
390
}
383
391
}
392
+
393
+ //ignore block
394
+ const _ignore : ( ) => void = ( ) => { } ;
0 commit comments