Skip to content

Commit 4e19fd9

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "11 fixes" * emailed patches from Andrew Morton <[email protected]>: .mailmap: add Christophe Ricard Make CONFIG_FHANDLE default y mm/page_isolation.c: fix the function comments oom, oom_reaper: do not enqueue task if it is on the oom_reaper_list head mm/page_isolation: fix tracepoint to mirror check function behavior mm/rmap: batched invalidations should use existing api x86/mm: TLB_REMOTE_SEND_IPI should count pages mm: fix invalid node in alloc_migrate_target() include/linux/huge_mm.h: return NULL instead of false for pmd_trans_huge_lock() mm, kasan: fix compilation for CONFIG_SLAB MAINTAINERS: orangefs mailing list is subscribers-only
2 parents 82d2a34 + 394532e commit 4e19fd9

File tree

11 files changed

+34
-42
lines changed

11 files changed

+34
-42
lines changed

Diff for: .mailmap

+1
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ Björn Steinbrink <[email protected]>
3333
Brian Avery <[email protected]>
3434
Brian King <[email protected]>
3535
Christoph Hellwig <[email protected]>
36+
Christophe Ricard <[email protected]>
3637
Corey Minyard <[email protected]>
3738
Damian Hobson-Garcia <[email protected]>
3839
David Brownell <[email protected]>

Diff for: MAINTAINERS

+1-1
Original file line numberDiff line numberDiff line change
@@ -8253,7 +8253,7 @@ F: Documentation/filesystems/overlayfs.txt
82538253

82548254
ORANGEFS FILESYSTEM
82558255
M: Mike Marshall <[email protected]>
8256-
8256+
L: [email protected] (subscribers-only)
82578257
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
82588258
S: Supported
82598259
F: fs/orangefs/

Diff for: arch/x86/include/asm/tlbflush.h

-6
Original file line numberDiff line numberDiff line change
@@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void)
319319

320320
#endif /* SMP */
321321

322-
/* Not inlined due to inc_irq_stat not being defined yet */
323-
#define flush_tlb_local() { \
324-
inc_irq_stat(irq_tlb_count); \
325-
local_flush_tlb(); \
326-
}
327-
328322
#ifndef CONFIG_PARAVIRT
329323
#define flush_tlb_others(mask, mm, start, end) \
330324
native_flush_tlb_others(mask, mm, start, end)

Diff for: arch/x86/mm/tlb.c

+10-4
Original file line numberDiff line numberDiff line change
@@ -104,10 +104,8 @@ static void flush_tlb_func(void *info)
104104

105105
inc_irq_stat(irq_tlb_count);
106106

107-
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
107+
if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
108108
return;
109-
if (!f->flush_end)
110-
f->flush_end = f->flush_start + PAGE_SIZE;
111109

112110
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
113111
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
135133
unsigned long end)
136134
{
137135
struct flush_tlb_info info;
136+
137+
if (end == 0)
138+
end = start + PAGE_SIZE;
138139
info.flush_mm = mm;
139140
info.flush_start = start;
140141
info.flush_end = end;
141142

142143
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
143-
trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
144+
if (end == TLB_FLUSH_ALL)
145+
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
146+
else
147+
trace_tlb_flush(TLB_REMOTE_SEND_IPI,
148+
(end - start) >> PAGE_SHIFT);
149+
144150
if (is_uv_system()) {
145151
unsigned int cpu;
146152

Diff for: include/linux/huge_mm.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
127127
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
128128
return __pmd_trans_huge_lock(pmd, vma);
129129
else
130-
return false;
130+
return NULL;
131131
}
132132
static inline int hpage_nr_pages(struct page *page)
133133
{

Diff for: include/trace/events/page_isolation.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated,
2929

3030
TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s",
3131
__entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
32-
__entry->end_pfn == __entry->fin_pfn ? "success" : "fail")
32+
__entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
3333
);
3434

3535
#endif /* _TRACE_PAGE_ISOLATION_H */

Diff for: init/Kconfig

+2-1
Original file line numberDiff line numberDiff line change
@@ -272,8 +272,9 @@ config CROSS_MEMORY_ATTACH
272272
See the man page for more details.
273273

274274
config FHANDLE
275-
bool "open by fhandle syscalls"
275+
bool "open by fhandle syscalls" if EXPERT
276276
select EXPORTFS
277+
default y
277278
help
278279
If you say Y here, a user level program will be able to map
279280
file names to handle and then later use the handle for

Diff for: mm/kasan/kasan.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,7 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
498498
struct kasan_alloc_meta *alloc_info =
499499
get_alloc_info(cache, object);
500500
alloc_info->state = KASAN_STATE_FREE;
501-
set_track(&free_info->track);
501+
set_track(&free_info->track, GFP_NOWAIT);
502502
}
503503
#endif
504504

Diff for: mm/oom_kill.c

+5-1
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,11 @@ static int oom_reaper(void *unused)
547547

548548
static void wake_oom_reaper(struct task_struct *tsk)
549549
{
550-
if (!oom_reaper_th || tsk->oom_reaper_list)
550+
if (!oom_reaper_th)
551+
return;
552+
553+
/* tsk is already queued? */
554+
if (tsk == oom_reaper_list || tsk->oom_reaper_list)
551555
return;
552556

553557
get_task_struct(tsk);

Diff for: mm/page_isolation.c

+5-5
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
215215
* all pages in [start_pfn...end_pfn) must be in the same zone.
216216
* zone->lock must be held before call this.
217217
*
218-
* Returns 1 if all pages in the range are isolated.
218+
* Returns the last tested pfn.
219219
*/
220220
static unsigned long
221221
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
289289
* now as a simple work-around, we use the next node for destination.
290290
*/
291291
if (PageHuge(page)) {
292-
nodemask_t src = nodemask_of_node(page_to_nid(page));
293-
nodemask_t dst;
294-
nodes_complement(dst, src);
292+
int node = next_online_node(page_to_nid(page));
293+
if (node == MAX_NUMNODES)
294+
node = first_online_node;
295295
return alloc_huge_page_node(page_hstate(compound_head(page)),
296-
next_node(page_to_nid(page), dst));
296+
node);
297297
}
298298

299299
if (PageHighMem(page))

Diff for: mm/rmap.c

+7-21
Original file line numberDiff line numberDiff line change
@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
569569
}
570570

571571
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
572-
static void percpu_flush_tlb_batch_pages(void *data)
573-
{
574-
/*
575-
* All TLB entries are flushed on the assumption that it is
576-
* cheaper to flush all TLBs and let them be refilled than
577-
* flushing individual PFNs. Note that we do not track mm's
578-
* to flush as that might simply be multiple full TLB flushes
579-
* for no gain.
580-
*/
581-
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
582-
flush_tlb_local();
583-
}
584-
585572
/*
586573
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
587574
* important if a PTE was dirty when it was unmapped that it's flushed
@@ -598,15 +585,14 @@ void try_to_unmap_flush(void)
598585

599586
cpu = get_cpu();
600587

601-
trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
602-
603-
if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
604-
percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
605-
606-
if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
607-
smp_call_function_many(&tlb_ubc->cpumask,
608-
percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
588+
if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
589+
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
590+
local_flush_tlb();
591+
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
609592
}
593+
594+
if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
595+
flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
610596
cpumask_clear(&tlb_ubc->cpumask);
611597
tlb_ubc->flush_required = false;
612598
tlb_ubc->writable = false;

0 commit comments

Comments
 (0)