Skip to content

Commit 4bffd05

Browse files
author
Charles Mirabile
committed
Fix mmu notifiers for range-based invalidates
JIRA: https://issues.redhat.com/browse/RHEL-83459 commit f7edb07 Author: Piotr Jaroszynski <[email protected]> Date: Tue, 4 Mar 2025 00:51:27 -0800 Update the __flush_tlb_range_op macro not to modify its parameters as these are unexepcted semantics. In practice, this fixes the call to mmu_notifier_arch_invalidate_secondary_tlbs() in __flush_tlb_range_nosync() to use the correct range instead of an empty range with start=end. The empty range was (un)lucky as it results in taking the invalidate-all path that doesn't cause correctness issues, but can certainly result in suboptimal perf. This has been broken since commit 6bbd42e ("mmu_notifiers: call invalidate_range() when invalidating TLBs") when the call to the notifiers was added to __flush_tlb_range(). It predates the addition of the __flush_tlb_range_op() macro from commit 3608390 ("arm64: tlb: Refactor the core flush algorithm of __flush_tlb_range") that made the bug hard to spot. Fixes: 6bbd42e ("mmu_notifiers: call invalidate_range() when invalidating TLBs") Signed-off-by: Piotr Jaroszynski <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Will Deacon <[email protected]> Cc: Robin Murphy <[email protected]> Cc: Alistair Popple <[email protected]> Cc: Raghavendra Rao Ananta <[email protected]> Cc: SeongJae Park <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: John Hubbard <[email protected]> Cc: Nicolin Chen <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Reviewed-by: Catalin Marinas <[email protected]> Reviewed-by: Alistair Popple <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Charles Mirabile <[email protected]>
1 parent ddd3653 commit 4bffd05

File tree

1 file changed

+12
-10
lines changed

1 file changed

+12
-10
lines changed

arch/arm64/include/asm/tlbflush.h

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
396396
#define __flush_tlb_range_op(op, start, pages, stride, \
397397
asid, tlb_level, tlbi_user, lpa2) \
398398
do { \
399+
typeof(start) __flush_start = start; \
400+
typeof(pages) __flush_pages = pages; \
399401
int num = 0; \
400402
int scale = 3; \
401403
int shift = lpa2 ? 16 : PAGE_SHIFT; \
402404
unsigned long addr; \
403405
\
404-
while (pages > 0) { \
406+
while (__flush_pages > 0) { \
405407
if (!system_supports_tlb_range() || \
406-
pages == 1 || \
407-
(lpa2 && start != ALIGN(start, SZ_64K))) { \
408-
addr = __TLBI_VADDR(start, asid); \
408+
__flush_pages == 1 || \
409+
(lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
410+
addr = __TLBI_VADDR(__flush_start, asid); \
409411
__tlbi_level(op, addr, tlb_level); \
410412
if (tlbi_user) \
411413
__tlbi_user_level(op, addr, tlb_level); \
412-
start += stride; \
413-
pages -= stride >> PAGE_SHIFT; \
414+
__flush_start += stride; \
415+
__flush_pages -= stride >> PAGE_SHIFT; \
414416
continue; \
415417
} \
416418
\
417-
num = __TLBI_RANGE_NUM(pages, scale); \
419+
num = __TLBI_RANGE_NUM(__flush_pages, scale); \
418420
if (num >= 0) { \
419-
addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
421+
addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
420422
scale, num, tlb_level); \
421423
__tlbi(r##op, addr); \
422424
if (tlbi_user) \
423425
__tlbi_user(r##op, addr); \
424-
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
425-
pages -= __TLBI_RANGE_PAGES(num, scale); \
426+
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
427+
__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
426428
} \
427429
scale--; \
428430
} \

0 commit comments

Comments
 (0)