Skip to content

Commit 63fc66f

Browse files
howlettakpm00
authored andcommitted
ipc/shm, mm: drop do_vma_munmap()
The do_vma_munmap() wrapper existed for callers that didn't have a vma iterator and needed to check the vma mseal status prior to calling the underlying munmap(). All callers now use a vma iterator and since the mseal check has been moved to do_vmi_align_munmap() and the vmas are aligned, this function can just be called instead. do_vmi_align_munmap() can no longer be static as ipc/shm is using it and it is exported via the mm.h header. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Liam R. Howlett <[email protected]> Reviewed-by: Lorenzo Stoakes <[email protected]> Cc: Bert Karwatzki <[email protected]> Cc: Jeff Xu <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Kees Cook <[email protected]> Cc: Lorenzo Stoakes <[email protected]> Cc: Mark Brown <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: "Paul E. McKenney" <[email protected]> Cc: Paul Moore <[email protected]> Cc: Sidhartha Kumar <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Cc: Vlastimil Babka <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 13d77e0 commit 63fc66f

File tree

5 files changed

+20
-43
lines changed

5 files changed

+20
-43
lines changed

include/linux/mm.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -3287,14 +3287,14 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
32873287
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
32883288
unsigned long start, size_t len, struct list_head *uf,
32893289
bool unlock);
3290+
int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3291+
struct mm_struct *mm, unsigned long start,
3292+
unsigned long end, struct list_head *uf, bool unlock);
32903293
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
32913294
struct list_head *uf);
32923295
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
32933296

32943297
#ifdef CONFIG_MMU
3295-
extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3296-
unsigned long start, unsigned long end,
3297-
struct list_head *uf, bool unlock);
32983298
extern int __mm_populate(unsigned long addr, unsigned long len,
32993299
int ignore_errors);
33003300
static inline void mm_populate(unsigned long addr, unsigned long len)

ipc/shm.c

+4-4
Original file line numberDiff line numberDiff line change
@@ -1778,8 +1778,8 @@ long ksys_shmdt(char __user *shmaddr)
17781778
*/
17791779
file = vma->vm_file;
17801780
size = i_size_read(file_inode(vma->vm_file));
1781-
do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
1782-
NULL, false);
1781+
do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
1782+
vma->vm_end, NULL, false);
17831783
/*
17841784
* We discovered the size of the shm segment, so
17851785
* break out of here and fall through to the next
@@ -1803,8 +1803,8 @@ long ksys_shmdt(char __user *shmaddr)
18031803
if ((vma->vm_ops == &shm_vm_ops) &&
18041804
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
18051805
(vma->vm_file == file)) {
1806-
do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
1807-
NULL, false);
1806+
do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
1807+
vma->vm_end, NULL, false);
18081808
}
18091809

18101810
vma = vma_next(&vmi);

mm/mmap.c

+6-27
Original file line numberDiff line numberDiff line change
@@ -169,11 +169,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
169169
goto out; /* mapping intersects with an existing non-brk vma. */
170170
/*
171171
* mm->brk must be protected by write mmap_lock.
172-
* do_vma_munmap() will drop the lock on success, so update it
173-
* before calling do_vma_munmap().
172+
* do_vmi_align_munmap() will drop the lock on success, so
173+
* update it before calling do_vma_munmap().
174174
*/
175175
mm->brk = brk;
176-
if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
176+
if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
177+
/* unlock = */ true))
177178
goto out;
178179

179180
goto success_unlocked;
@@ -1479,9 +1480,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
14791480
vma->vm_file = get_file(file);
14801481
/*
14811482
* call_mmap() may map PTE, so ensure there are no existing PTEs
1482-
* call the vm_ops close function if one exists.
1483+
* and call the vm_ops close function if one exists.
14831484
*/
1484-
vms_clean_up_area(&vms, &mas_detach, true);
1485+
vms_clean_up_area(&vms, &mas_detach);
14851486
error = call_mmap(file, vma);
14861487
if (error)
14871488
goto unmap_and_free_vma;
@@ -1744,28 +1745,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
17441745
return ret;
17451746
}
17461747

1747-
/*
1748-
* do_vma_munmap() - Unmap a full or partial vma.
1749-
* @vmi: The vma iterator pointing at the vma
1750-
* @vma: The first vma to be munmapped
1751-
* @start: the start of the address to unmap
1752-
* @end: The end of the address to unmap
1753-
* @uf: The userfaultfd list_head
1754-
* @unlock: Drop the lock on success
1755-
*
1756-
* unmaps a VMA mapping when the vma iterator is already in position.
1757-
* Does not handle alignment.
1758-
*
1759-
* Return: 0 on success drops the lock of so directed, error on failure and will
1760-
* still hold the lock.
1761-
*/
1762-
int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1763-
unsigned long start, unsigned long end, struct list_head *uf,
1764-
bool unlock)
1765-
{
1766-
return do_vmi_align_munmap(vmi, vma, vma->vm_mm, start, end, uf, unlock);
1767-
}
1768-
17691748
/*
17701749
* do_brk_flags() - Increase the brk vma if the flags match.
17711750
* @vmi: The vma iterator

mm/vma.c

+6-6
Original file line numberDiff line numberDiff line change
@@ -658,8 +658,8 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
658658
*/
659659
mas_set(mas_detach, 1);
660660
lru_add_drain();
661-
tlb_gather_mmu(&tlb, vms->mm);
662-
update_hiwater_rss(vms->mm);
661+
tlb_gather_mmu(&tlb, vms->vma->vm_mm);
662+
update_hiwater_rss(vms->vma->vm_mm);
663663
unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
664664
vms->vma_count, mm_wr_locked);
665665

@@ -672,14 +672,14 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
672672
}
673673

674674
void vms_clean_up_area(struct vma_munmap_struct *vms,
675-
struct ma_state *mas_detach, bool mm_wr_locked)
675+
struct ma_state *mas_detach)
676676
{
677677
struct vm_area_struct *vma;
678678

679679
if (!vms->nr_pages)
680680
return;
681681

682-
vms_clear_ptes(vms, mas_detach, mm_wr_locked);
682+
vms_clear_ptes(vms, mas_detach, true);
683683
mas_set(mas_detach, 0);
684684
mas_for_each(mas_detach, vma, ULONG_MAX)
685685
if (vma->vm_ops && vma->vm_ops->close)
@@ -702,7 +702,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
702702
struct vm_area_struct *vma;
703703
struct mm_struct *mm;
704704

705-
mm = vms->mm;
705+
mm = current->mm;
706706
mm->map_count -= vms->vma_count;
707707
mm->locked_vm -= vms->locked_vm;
708708
if (vms->unlock)
@@ -770,7 +770,7 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
770770
* its limit temporarily, to help free resources as expected.
771771
*/
772772
if (vms->end < vms->vma->vm_end &&
773-
vms->mm->map_count >= sysctl_max_map_count)
773+
vms->vma->vm_mm->map_count >= sysctl_max_map_count)
774774
goto map_count_exceeded;
775775

776776
/* Don't bother splitting the VMA if we can't unmap it anyway */

mm/vma.h

+1-3
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ struct unlink_vma_file_batch {
3131
*/
3232
struct vma_munmap_struct {
3333
struct vma_iterator *vmi;
34-
struct mm_struct *mm;
3534
struct vm_area_struct *vma; /* The first vma to munmap */
3635
struct vm_area_struct *prev; /* vma before the munmap area */
3736
struct vm_area_struct *next; /* vma after the munmap area */
@@ -114,7 +113,6 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
114113
unsigned long start, unsigned long end, struct list_head *uf,
115114
bool unlock)
116115
{
117-
vms->mm = current->mm;
118116
vms->vmi = vmi;
119117
vms->vma = vma;
120118
if (vma) {
@@ -142,7 +140,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
142140
struct ma_state *mas_detach);
143141

144142
void vms_clean_up_area(struct vma_munmap_struct *vms,
145-
struct ma_state *mas_detach, bool mm_wr_locked);
143+
struct ma_state *mas_detach);
146144

147145
/*
148146
* reattach_vmas() - Undo any munmap work and free resources

0 commit comments

Comments
 (0)