Skip to content

Commit 310959e

Browse files
committed
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Benjamin Herrenschmidt: "It looks like my rewrite of our lazy irq scheme is still exposing "interesting" issues left and right. The previous fixes are now causing an occasional BUG_ON to trigger (which this patch turns into a WARN_ON while at it), due to another issue of disconnect of the lazy irq state vs the processor state in the idle loop on pseries and cell. This should fix it properly once for all moving the nasty code to a common helper function. There's also couple more fixes for some debug stuff that didn't build (and helped resolving those problems so it's worth having), along with a compile fix for newer gcc's." * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: tty/hvc_opal: Fix debug function name powerpc/numa: Avoid stupid uninitialized warning from gcc powerpc: Fix build of some debug irq code powerpc: More fixes for lazy IRQ vs. idle
2 parents bc51b0c + 50fb31c commit 310959e

File tree

6 files changed

+69
-17
lines changed

6 files changed

+69
-17
lines changed

arch/powerpc/include/asm/hw_irq.h

+4-2
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
8686
}
8787

8888
#ifdef CONFIG_PPC_BOOK3E
89-
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
90-
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
89+
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
90+
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
9191
#else
9292
#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
9393
#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
@@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
125125
return !regs->softe;
126126
}
127127

128+
extern bool prep_irq_for_idle(void);
129+
128130
#else /* CONFIG_PPC64 */
129131

130132
#define SET_MSR_EE(x) mtmsr(x)

arch/powerpc/kernel/irq.c

+47-1
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
229229
*/
230230
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
231231
__hard_irq_disable();
232-
#ifdef CONFIG_TRACE_IRQFLAG
232+
#ifdef CONFIG_TRACE_IRQFLAGS
233233
else {
234234
/*
235235
* We should already be hard disabled here. We had bugs
@@ -286,6 +286,52 @@ void notrace restore_interrupts(void)
286286
__hard_irq_enable();
287287
}
288288

289+
/*
290+
* This is a helper to use when about to go into idle low-power
291+
* when the latter has the side effect of re-enabling interrupts
292+
* (such as calling H_CEDE under pHyp).
293+
*
294+
* You call this function with interrupts soft-disabled (this is
295+
* already the case when ppc_md.power_save is called). The function
296+
* will return whether to enter power save or just return.
297+
*
298+
* In the former case, it will have notified lockdep of interrupts
299+
* being re-enabled and generally sanitized the lazy irq state,
300+
* and in the latter case it will leave with interrupts hard
301+
* disabled and marked as such, so the local_irq_enable() call
302+
* in cpu_idle() will properly re-enable everything.
303+
*/
304+
bool prep_irq_for_idle(void)
305+
{
306+
/*
307+
* First we need to hard disable to ensure no interrupt
308+
* occurs before we effectively enter the low power state
309+
*/
310+
hard_irq_disable();
311+
312+
/*
313+
* If anything happened while we were soft-disabled,
314+
* we return now and do not enter the low power state.
315+
*/
316+
if (lazy_irq_pending())
317+
return false;
318+
319+
/* Tell lockdep we are about to re-enable */
320+
trace_hardirqs_on();
321+
322+
/*
323+
* Mark interrupts as soft-enabled and clear the
324+
* PACA_IRQ_HARD_DIS from the pending mask since we
325+
* are about to hard enable as well as a side effect
326+
* of entering the low power state.
327+
*/
328+
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
329+
local_paca->soft_enabled = 1;
330+
331+
/* Tell the caller to enter the low power state */
332+
return true;
333+
}
334+
289335
#endif /* CONFIG_PPC64 */
290336

291337
int arch_show_interrupts(struct seq_file *p, int prec)

arch/powerpc/mm/numa.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -639,7 +639,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
639639
unsigned int n, rc, ranges, is_kexec_kdump = 0;
640640
unsigned long lmb_size, base, size, sz;
641641
int nid;
642-
struct assoc_arrays aa;
642+
struct assoc_arrays aa = { .arrays = NULL };
643643

644644
n = of_get_drconf_memory(memory, &dm);
645645
if (!n)

arch/powerpc/platforms/cell/pervasive.c

+6-5
Original file line numberDiff line numberDiff line change
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
4242
{
4343
unsigned long ctrl, thread_switch_control;
4444

45-
/*
46-
* We need to hard disable interrupts, the local_irq_enable() done by
47-
* our caller upon return will hard re-enable.
48-
*/
49-
hard_irq_disable();
45+
/* Ensure our interrupt state is properly tracked */
46+
if (!prep_irq_for_idle())
47+
return;
5048

5149
ctrl = mfspr(SPRN_CTRLF);
5250

@@ -81,6 +79,9 @@ static void cbe_power_save(void)
8179
*/
8280
ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
8381
mtspr(SPRN_CTRLT, ctrl);
82+
83+
/* Re-enable interrupts in MSR */
84+
__hard_irq_enable();
8485
}
8586

8687
static int cbe_system_reset_exception(struct pt_regs *regs)

arch/powerpc/platforms/pseries/processor_idle.c

+10-7
Original file line numberDiff line numberDiff line change
@@ -99,15 +99,18 @@ static int snooze_loop(struct cpuidle_device *dev,
9999
static void check_and_cede_processor(void)
100100
{
101101
/*
102-
* Interrupts are soft-disabled at this point,
103-
* but not hard disabled. So an interrupt might have
104-
* occurred before entering NAP, and would be potentially
105-
* lost (edge events, decrementer events, etc...) unless
106-
* we first hard disable then check.
102+
* Ensure our interrupt state is properly tracked,
103+
* also checks if no interrupt has occurred while we
104+
* were soft-disabled
107105
*/
108-
hard_irq_disable();
109-
if (!lazy_irq_pending())
106+
if (prep_irq_for_idle()) {
110107
cede_processor();
108+
#ifdef CONFIG_TRACE_IRQFLAGS
109+
/* Ensure that H_CEDE returns with IRQs on */
110+
if (WARN_ON(!(mfmsr() & MSR_EE)))
111+
__hard_irq_enable();
112+
#endif
113+
}
111114
}
112115

113116
static int dedicated_cede_loop(struct cpuidle_device *dev,

drivers/tty/hvc/hvc_opal.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ void __init hvc_opal_init_early(void)
401401
}
402402

403403
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
404-
void __init udbg_init_debug_opal(void)
404+
void __init udbg_init_debug_opal_raw(void)
405405
{
406406
u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
407407
hvc_opal_privs[index] = &hvc_opal_boot_priv;

0 commit comments

Comments
 (0)