@@ -218,13 +218,13 @@ void test_hash_table_file_operations_empty_table()
218
218
HashTable_t * ht = ht_new ();
219
219
220
220
// Dumping empty hash table
221
- FILE * fp = fopen ("hash.bin" , "wb " );
221
+ FILE * fp = fopen ("hash.bin" , "w " );
222
222
ht_dump (ht , fp );
223
223
224
224
ht_delete (ht );
225
225
226
226
// Reading ht from same file
227
- freopen ("hash.bin" , "rb+ " , fp );
227
+ freopen ("hash.bin" , "r " , fp );
228
228
ht = ht_from_file (fp );
229
229
230
230
// Make sure it's an empty hash table
@@ -240,12 +240,12 @@ void test_hash_table_file_operations_resized_table()
240
240
{// Can dump/read resized ht
241
241
HashTable_t * ht = ht_new ();
242
242
unsigned long resized_capacity = ht_capacity (ht ) + 1 ;
243
- FILE * fp = fopen ("hash.bin" , "wb " );
243
+ FILE * fp = fopen ("hash.bin" , "w " );
244
244
245
245
ht_resize (ht , resized_capacity );
246
246
ht_dump (ht , fp );
247
247
248
- freopen ("hash.bin" , "rb " , fp );
248
+ freopen ("hash.bin" , "r " , fp );
249
249
ht_delete (ht );
250
250
ht = ht_from_file (fp );
251
251
@@ -261,12 +261,12 @@ void test_hash_table_file_operations_one_article()
261
261
HashTable_t * ht = ht_new ();
262
262
const char * const a_key = "DOI" ;
263
263
Article_t * const a = make_article (a_key , "Title" , "Author" , 2000 );
264
- FILE * fp = fopen ("hash.bin" , "wb " );
264
+ FILE * fp = fopen ("hash.bin" , "w " );
265
265
266
266
ht_insert (ht , a );
267
267
ht_dump (ht , fp );
268
268
269
- freopen ("hash.bin" , "rb+ " , fp );
269
+ freopen ("hash.bin" , "r " , fp );
270
270
ht_delete (ht );
271
271
ht = ht_from_file (fp );
272
272
@@ -282,37 +282,51 @@ void test_hash_table_file_operations_one_article()
282
282
ht_delete (ht );
283
283
}
284
284
285
- void test_hash_table_file_operations ()
285
+ void test_hash_table_file_operations_two_articles ()
286
286
{
287
- test_hash_table_file_operations_empty_table ();
288
- test_hash_table_file_operations_resized_table ();
289
- test_hash_table_file_operations_one_article ();
290
-
291
287
HashTable_t * ht = ht_new ();
292
- const char * const a_key = "DOI " ;
293
- const char * const b_key = "Other_DOI " ;
294
- Article_t * const a = make_article (a_key , "Title" , "Author" , 2000 );
295
- Article_t * const b = make_article (b_key , "Title " , "Author" , 2000 );
296
- FILE * fp = fopen ("hash.bin" , "wb " );
288
+ const char * const a_key = "DOI_without_spaces " ;
289
+ const char * const b_key = "DOI with spaces " ;
290
+ Article_t * const a = make_article (a_key , "Title with spaces " , "Author" , 2000 );
291
+ Article_t * const b = make_article (b_key , "Title_without_spaces " , "Author with spaces " , 2000 );
292
+ FILE * fp = fopen ("hash.bin" , "w " );
297
293
298
294
ht_insert (ht , a );
299
295
ht_insert (ht , b );
300
296
ht_dump (ht , fp );
301
297
ht_delete (ht );
302
298
303
- freopen ("hash.bin" , "rb " , fp );
299
+ freopen ("hash.bin" , "r " , fp );
304
300
ht = ht_from_file (fp );
305
301
306
302
assert (ht_is_empty (ht ) == false);
307
303
assert (ht_count (ht ) == 2 );
308
304
305
+ assert (ht_contains (ht , a_key ) == true);
306
+ const Article_t * fetched = ht_fetch (ht , a_key );
307
+ assert (fetched != NULL );
308
+ assert (articles_are_equal (a , fetched ));
309
+
310
+ assert (ht_contains (ht , b_key ) == true);
311
+ fetched = ht_fetch (ht , b_key );
312
+ assert (fetched != NULL );
313
+ assert (articles_are_equal (b , fetched ));
314
+
309
315
debug ("Can dump ht with two articles" );
310
316
311
317
delete_article (a );
312
318
delete_article (b );
313
319
ht_delete (ht );
314
320
}
315
321
322
+ void test_hash_table_file_operations ()
323
+ {
324
+ test_hash_table_file_operations_empty_table ();
325
+ test_hash_table_file_operations_resized_table ();
326
+ test_hash_table_file_operations_one_article ();
327
+ test_hash_table_file_operations_two_articles ();
328
+ }
329
+
316
330
void print_test_status ()
317
331
{
318
332
if (global_failure )
0 commit comments