@@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
396
396
#define __flush_tlb_range_op (op , start , pages , stride , \
397
397
asid , tlb_level , tlbi_user , lpa2 ) \
398
398
do { \
399
+ typeof(start) __flush_start = start; \
400
+ typeof(pages) __flush_pages = pages; \
399
401
int num = 0; \
400
402
int scale = 3; \
401
403
int shift = lpa2 ? 16 : PAGE_SHIFT; \
402
404
unsigned long addr; \
403
405
\
404
- while (pages > 0) { \
406
+ while (__flush_pages > 0) { \
405
407
if (!system_supports_tlb_range() || \
406
- pages == 1 || \
407
- (lpa2 && start != ALIGN(start , SZ_64K))) { \
408
- addr = __TLBI_VADDR(start , asid); \
408
+ __flush_pages == 1 || \
409
+ (lpa2 && __flush_start != ALIGN(__flush_start , SZ_64K))) { \
410
+ addr = __TLBI_VADDR(__flush_start , asid); \
409
411
__tlbi_level(op, addr, tlb_level); \
410
412
if (tlbi_user) \
411
413
__tlbi_user_level(op, addr, tlb_level); \
412
- start += stride; \
413
- pages -= stride >> PAGE_SHIFT; \
414
+ __flush_start += stride; \
415
+ __flush_pages -= stride >> PAGE_SHIFT; \
414
416
continue; \
415
417
} \
416
418
\
417
- num = __TLBI_RANGE_NUM(pages , scale); \
419
+ num = __TLBI_RANGE_NUM(__flush_pages , scale); \
418
420
if (num >= 0) { \
419
- addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
421
+ addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
420
422
scale, num, tlb_level); \
421
423
__tlbi(r##op, addr); \
422
424
if (tlbi_user) \
423
425
__tlbi_user(r##op, addr); \
424
- start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
425
- pages -= __TLBI_RANGE_PAGES(num, scale); \
426
+ __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
427
+ __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
426
428
} \
427
429
scale--; \
428
430
} \
0 commit comments