Skip to content

Commit c825dda

Browse files
author
Russell King
committed
Merge branch 'for_3_2/for-rmk/arm_cpu_pm' of git://gitorious.org/omap-sw-develoment/linux-omap-dev into devel-stable
2 parents b0a37dc + 8fb5428 commit c825dda

File tree

11 files changed

+578
-9
lines changed

11 files changed

+578
-9
lines changed

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ config ARM
2929
select HAVE_GENERIC_HARDIRQS
3030
select HAVE_SPARSE_IRQ
3131
select GENERIC_IRQ_SHOW
32+
select CPU_PM if (SUSPEND || CPU_IDLE)
3233
help
3334
The ARM series is a line of low-power-consumption RISC chip designs
3435
licensed by ARM Ltd and targeted at embedded applications and

arch/arm/common/gic.c

Lines changed: 188 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/kernel.h>
2727
#include <linux/list.h>
2828
#include <linux/smp.h>
29+
#include <linux/cpu_pm.h>
2930
#include <linux/cpumask.h>
3031
#include <linux/io.h>
3132

@@ -276,6 +277,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
276277
if (gic_irqs > 1020)
277278
gic_irqs = 1020;
278279

280+
gic->gic_irqs = gic_irqs;
281+
279282
/*
280283
* Set all global interrupts to be level triggered, active low.
281284
*/
@@ -343,6 +346,189 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
343346
writel_relaxed(1, base + GIC_CPU_CTRL);
344347
}
345348

349+
#ifdef CONFIG_CPU_PM
350+
/*
351+
* Saves the GIC distributor registers during suspend or idle. Must be called
352+
* with interrupts disabled but before powering down the GIC. After calling
353+
* this function, no interrupts will be delivered by the GIC, and another
354+
* platform-specific wakeup source must be enabled.
355+
*/
356+
static void gic_dist_save(unsigned int gic_nr)
357+
{
358+
unsigned int gic_irqs;
359+
void __iomem *dist_base;
360+
int i;
361+
362+
if (gic_nr >= MAX_GIC_NR)
363+
BUG();
364+
365+
gic_irqs = gic_data[gic_nr].gic_irqs;
366+
dist_base = gic_data[gic_nr].dist_base;
367+
368+
if (!dist_base)
369+
return;
370+
371+
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
372+
gic_data[gic_nr].saved_spi_conf[i] =
373+
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
374+
375+
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
376+
gic_data[gic_nr].saved_spi_target[i] =
377+
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
378+
379+
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
380+
gic_data[gic_nr].saved_spi_enable[i] =
381+
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
382+
}
383+
384+
/*
385+
* Restores the GIC distributor registers during resume or when coming out of
386+
* idle. Must be called before enabling interrupts. If a level interrupt
387+
* that occured while the GIC was suspended is still present, it will be
388+
* handled normally, but any edge interrupts that occured will not be seen by
389+
* the GIC and need to be handled by the platform-specific wakeup source.
390+
*/
391+
static void gic_dist_restore(unsigned int gic_nr)
392+
{
393+
unsigned int gic_irqs;
394+
unsigned int i;
395+
void __iomem *dist_base;
396+
397+
if (gic_nr >= MAX_GIC_NR)
398+
BUG();
399+
400+
gic_irqs = gic_data[gic_nr].gic_irqs;
401+
dist_base = gic_data[gic_nr].dist_base;
402+
403+
if (!dist_base)
404+
return;
405+
406+
writel_relaxed(0, dist_base + GIC_DIST_CTRL);
407+
408+
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
409+
writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
410+
dist_base + GIC_DIST_CONFIG + i * 4);
411+
412+
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
413+
writel_relaxed(0xa0a0a0a0,
414+
dist_base + GIC_DIST_PRI + i * 4);
415+
416+
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
417+
writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
418+
dist_base + GIC_DIST_TARGET + i * 4);
419+
420+
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
421+
writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
422+
dist_base + GIC_DIST_ENABLE_SET + i * 4);
423+
424+
writel_relaxed(1, dist_base + GIC_DIST_CTRL);
425+
}
426+
427+
static void gic_cpu_save(unsigned int gic_nr)
428+
{
429+
int i;
430+
u32 *ptr;
431+
void __iomem *dist_base;
432+
void __iomem *cpu_base;
433+
434+
if (gic_nr >= MAX_GIC_NR)
435+
BUG();
436+
437+
dist_base = gic_data[gic_nr].dist_base;
438+
cpu_base = gic_data[gic_nr].cpu_base;
439+
440+
if (!dist_base || !cpu_base)
441+
return;
442+
443+
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
444+
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
445+
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
446+
447+
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
448+
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
449+
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
450+
451+
}
452+
453+
static void gic_cpu_restore(unsigned int gic_nr)
454+
{
455+
int i;
456+
u32 *ptr;
457+
void __iomem *dist_base;
458+
void __iomem *cpu_base;
459+
460+
if (gic_nr >= MAX_GIC_NR)
461+
BUG();
462+
463+
dist_base = gic_data[gic_nr].dist_base;
464+
cpu_base = gic_data[gic_nr].cpu_base;
465+
466+
if (!dist_base || !cpu_base)
467+
return;
468+
469+
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
470+
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
471+
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
472+
473+
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
474+
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
475+
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
476+
477+
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
478+
writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
479+
480+
writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
481+
writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
482+
}
483+
484+
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
485+
{
486+
int i;
487+
488+
for (i = 0; i < MAX_GIC_NR; i++) {
489+
switch (cmd) {
490+
case CPU_PM_ENTER:
491+
gic_cpu_save(i);
492+
break;
493+
case CPU_PM_ENTER_FAILED:
494+
case CPU_PM_EXIT:
495+
gic_cpu_restore(i);
496+
break;
497+
case CPU_CLUSTER_PM_ENTER:
498+
gic_dist_save(i);
499+
break;
500+
case CPU_CLUSTER_PM_ENTER_FAILED:
501+
case CPU_CLUSTER_PM_EXIT:
502+
gic_dist_restore(i);
503+
break;
504+
}
505+
}
506+
507+
return NOTIFY_OK;
508+
}
509+
510+
static struct notifier_block gic_notifier_block = {
511+
.notifier_call = gic_notifier,
512+
};
513+
514+
static void __init gic_pm_init(struct gic_chip_data *gic)
515+
{
516+
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
517+
sizeof(u32));
518+
BUG_ON(!gic->saved_ppi_enable);
519+
520+
gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
521+
sizeof(u32));
522+
BUG_ON(!gic->saved_ppi_conf);
523+
524+
cpu_pm_register_notifier(&gic_notifier_block);
525+
}
526+
#else
527+
static void __init gic_pm_init(struct gic_chip_data *gic)
528+
{
529+
}
530+
#endif
531+
346532
void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
347533
void __iomem *dist_base, void __iomem *cpu_base)
348534
{
@@ -358,8 +544,10 @@ void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
358544
if (gic_nr == 0)
359545
gic_cpu_base_addr = cpu_base;
360546

547+
gic_chip.flags |= gic_arch_extn.flags;
361548
gic_dist_init(gic, irq_start);
362549
gic_cpu_init(gic);
550+
gic_pm_init(gic);
363551
}
364552

365553
void __cpuinit gic_secondary_init(unsigned int gic_nr)

arch/arm/include/asm/hardware/gic.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,14 @@ struct gic_chip_data {
4646
unsigned int irq_offset;
4747
void __iomem *dist_base;
4848
void __iomem *cpu_base;
49+
#ifdef CONFIG_CPU_PM
50+
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
51+
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
52+
u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
53+
u32 __percpu *saved_ppi_enable;
54+
u32 __percpu *saved_ppi_conf;
55+
#endif
56+
unsigned int gic_irqs;
4957
};
5058
#endif
5159

arch/arm/include/asm/mach/map.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ struct map_desc {
2929
#define MT_MEMORY_NONCACHED 11
3030
#define MT_MEMORY_DTCM 12
3131
#define MT_MEMORY_ITCM 13
32+
#define MT_MEMORY_SO 14
3233

3334
#ifdef CONFIG_MMU
3435
extern void iotable_init(struct map_desc *, int);

arch/arm/include/asm/pgtable.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,9 @@ extern pgprot_t pgprot_kernel;
232232
#define pgprot_writecombine(prot) \
233233
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
234234

235+
#define pgprot_stronglyordered(prot) \
236+
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
237+
235238
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
236239
#define pgprot_dmacoherent(prot) \
237240
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)

arch/arm/mm/mmu.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,14 @@ static struct mem_type mem_types[] = {
273273
.prot_l1 = PMD_TYPE_TABLE,
274274
.domain = DOMAIN_KERNEL,
275275
},
276+
[MT_MEMORY_SO] = {
277+
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
278+
L_PTE_MT_UNCACHED,
279+
.prot_l1 = PMD_TYPE_TABLE,
280+
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
281+
PMD_SECT_UNCACHED | PMD_SECT_XN,
282+
.domain = DOMAIN_KERNEL,
283+
},
276284
};
277285

278286
const struct mem_type *get_mem_type(unsigned int type)

arch/arm/vfp/vfpmodule.c

Lines changed: 22 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <linux/module.h>
1212
#include <linux/types.h>
1313
#include <linux/cpu.h>
14+
#include <linux/cpu_pm.h>
1415
#include <linux/kernel.h>
1516
#include <linux/notifier.h>
1617
#include <linux/signal.h>
@@ -68,7 +69,7 @@ static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
6869
/*
6970
* Force a reload of the VFP context from the thread structure. We do
7071
* this by ensuring that access to the VFP hardware is disabled, and
71-
* clear last_VFP_context. Must be called from non-preemptible context.
72+
* clear vfp_current_hw_state. Must be called from non-preemptible context.
7273
*/
7374
static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
7475
{
@@ -436,9 +437,7 @@ static void vfp_enable(void *unused)
436437
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
437438
}
438439

439-
#ifdef CONFIG_PM
440-
#include <linux/syscore_ops.h>
441-
440+
#ifdef CONFIG_CPU_PM
442441
static int vfp_pm_suspend(void)
443442
{
444443
struct thread_info *ti = current_thread_info();
@@ -468,19 +467,33 @@ static void vfp_pm_resume(void)
468467
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
469468
}
470469

471-
static struct syscore_ops vfp_pm_syscore_ops = {
472-
.suspend = vfp_pm_suspend,
473-
.resume = vfp_pm_resume,
470+
static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
471+
void *v)
472+
{
473+
switch (cmd) {
474+
case CPU_PM_ENTER:
475+
vfp_pm_suspend();
476+
break;
477+
case CPU_PM_ENTER_FAILED:
478+
case CPU_PM_EXIT:
479+
vfp_pm_resume();
480+
break;
481+
}
482+
return NOTIFY_OK;
483+
}
484+
485+
static struct notifier_block vfp_cpu_pm_notifier_block = {
486+
.notifier_call = vfp_cpu_pm_notifier,
474487
};
475488

476489
static void vfp_pm_init(void)
477490
{
478-
register_syscore_ops(&vfp_pm_syscore_ops);
491+
cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
479492
}
480493

481494
#else
482495
static inline void vfp_pm_init(void) { }
483-
#endif /* CONFIG_PM */
496+
#endif /* CONFIG_CPU_PM */
484497

485498
/*
486499
* Ensure that the VFP state stored in 'thread->vfpstate' is up to date

0 commit comments

Comments
 (0)