Skip to content

Commit 9f25f35

Browse files
xp4ns3akpm00
authored andcommitted
hfsplus: convert kmap() to kmap_local_page() in btree.c
kmap() is being deprecated in favor of kmap_local_page(). There are two main problems with kmap(): (1) It comes with an overhead as mapping space is restricted and protected by a global lock for synchronization and (2) it also requires global TLB invalidation when the kmap's pool wraps and it might block when the mapping space is fully utilized until a slot becomes available. With kmap_local_page() the mappings are per thread, CPU local, can take page faults, and can be called from any context (including interrupts). It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore, the tasks can be preempted and, when they are scheduled to run again, the kernel virtual addresses are restored and are still valid. Since its use in btree.c is safe everywhere, it should be preferred. Therefore, replace kmap() with kmap_local_page() in btree.c. Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with HIGHMEM64GB enabled. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Fabio M. De Francesco <[email protected]> Suggested-by: Ira Weiny <[email protected]> Reviewed-by: Ira Weiny <[email protected]> Reviewed-by: Viacheslav Dubeyko <[email protected]> Cc: Bart Van Assche <[email protected]> Cc: Jens Axboe <[email protected]> Cc: Kees Cook <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Muchun Song <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent f9ef3b9 commit 9f25f35

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

fs/hfsplus/btree.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
163163
goto free_inode;
164164

165165
/* Load the header */
166-
head = (struct hfs_btree_header_rec *)(kmap(page) +
166+
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
167167
sizeof(struct hfs_bnode_desc));
168168
tree->root = be32_to_cpu(head->root);
169169
tree->leaf_count = be32_to_cpu(head->leaf_count);
@@ -240,12 +240,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
240240
(tree->node_size + PAGE_SIZE - 1) >>
241241
PAGE_SHIFT;
242242

243-
kunmap(page);
243+
kunmap_local(head);
244244
put_page(page);
245245
return tree;
246246

247247
fail_page:
248-
kunmap(page);
248+
kunmap_local(head);
249249
put_page(page);
250250
free_inode:
251251
tree->inode->i_mapping->a_ops = &hfsplus_aops;
@@ -292,7 +292,7 @@ int hfs_btree_write(struct hfs_btree *tree)
292292
return -EIO;
293293
/* Load the header */
294294
page = node->page[0];
295-
head = (struct hfs_btree_header_rec *)(kmap(page) +
295+
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
296296
sizeof(struct hfs_bnode_desc));
297297

298298
head->root = cpu_to_be32(tree->root);
@@ -304,7 +304,7 @@ int hfs_btree_write(struct hfs_btree *tree)
304304
head->attributes = cpu_to_be32(tree->attributes);
305305
head->depth = cpu_to_be16(tree->depth);
306306

307-
kunmap(page);
307+
kunmap_local(head);
308308
set_page_dirty(page);
309309
hfs_bnode_put(node);
310310
return 0;
@@ -395,7 +395,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
395395

396396
off += node->page_offset;
397397
pagep = node->page + (off >> PAGE_SHIFT);
398-
data = kmap(*pagep);
398+
data = kmap_local_page(*pagep);
399399
off &= ~PAGE_MASK;
400400
idx = 0;
401401

@@ -408,7 +408,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
408408
idx += i;
409409
data[off] |= m;
410410
set_page_dirty(*pagep);
411-
kunmap(*pagep);
411+
kunmap_local(data);
412412
tree->free_nodes--;
413413
mark_inode_dirty(tree->inode);
414414
hfs_bnode_put(node);
@@ -418,14 +418,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
418418
}
419419
}
420420
if (++off >= PAGE_SIZE) {
421-
kunmap(*pagep);
422-
data = kmap(*++pagep);
421+
kunmap_local(data);
422+
data = kmap_local_page(*++pagep);
423423
off = 0;
424424
}
425425
idx += 8;
426426
len--;
427427
}
428-
kunmap(*pagep);
428+
kunmap_local(data);
429429
nidx = node->next;
430430
if (!nidx) {
431431
hfs_dbg(BNODE_MOD, "create new bmap node\n");
@@ -441,7 +441,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
441441
off = off16;
442442
off += node->page_offset;
443443
pagep = node->page + (off >> PAGE_SHIFT);
444-
data = kmap(*pagep);
444+
data = kmap_local_page(*pagep);
445445
off &= ~PAGE_MASK;
446446
}
447447
}
@@ -491,21 +491,21 @@ void hfs_bmap_free(struct hfs_bnode *node)
491491
}
492492
off += node->page_offset + nidx / 8;
493493
page = node->page[off >> PAGE_SHIFT];
494-
data = kmap(page);
494+
data = kmap_local_page(page);
495495
off &= ~PAGE_MASK;
496496
m = 1 << (~nidx & 7);
497497
byte = data[off];
498498
if (!(byte & m)) {
499499
pr_crit("trying to free free bnode "
500500
"%u(%d)\n",
501501
node->this, node->type);
502-
kunmap(page);
502+
kunmap_local(data);
503503
hfs_bnode_put(node);
504504
return;
505505
}
506506
data[off] = byte & ~m;
507507
set_page_dirty(page);
508-
kunmap(page);
508+
kunmap_local(data);
509509
hfs_bnode_put(node);
510510
tree->free_nodes++;
511511
mark_inode_dirty(tree->inode);

0 commit comments

Comments
 (0)