2009-11-28 08:17:18 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2002 ARM Ltd.
|
|
|
|
* Copyright (C) 2008 STMicroelctronics.
|
|
|
|
* Copyright (C) 2009 ST-Ericsson.
|
|
|
|
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
|
|
|
|
*
|
|
|
|
* This file is based on arm realview platform
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
2011-04-03 14:01:30 +02:00
|
|
|
#include <asm/hardware/gic.h>
|
2009-11-28 08:17:18 +01:00
|
|
|
#include <asm/smp_scu.h>
|
|
|
|
#include <mach/hardware.h>
|
2010-12-08 06:37:57 +01:00
|
|
|
#include <mach/setup.h>
|
2009-11-28 08:17:18 +01:00
|
|
|
|
2011-05-06 13:56:27 +02:00
|
|
|
/* This is called from headsmp.S to wakeup the secondary core */
|
|
|
|
extern void u8500_secondary_startup(void);
|
|
|
|
|
2009-11-28 08:17:18 +01:00
|
|
|
/*
|
|
|
|
* control for which core is the next to come out of the secondary
|
|
|
|
* boot "holding pen"
|
|
|
|
*/
|
2010-12-15 08:36:02 +01:00
|
|
|
volatile int pen_release = -1;
|
2009-11-28 08:17:18 +01:00
|
|
|
|
ARM: Fix subtle race in CPU pen_release hotplug code
There is a subtle race in the CPU hotplug code, where a CPU which has
been offlined can online itself before being requested, which results
in things going astray on the next online/offline cycle.
What happens in the normal online/offline/online cycle is:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads -1
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
However, as the write of -1 of pen_release is not fully flushed back to
memory, and the checking of pen_release is done with caches disabled,
this allows CPU3 the opportunity to read the old value of pen_release:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads 3
starts boot
pen_release = -1
requests boot of CPU3
pen_release = 3
flush cache line
Fix this by grouping the write of pen_release along with its cache line
flushing code to ensure that any update to pen_release is always pushed
out to physical memory.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-18 11:53:12 +01:00
|
|
|
/*
|
|
|
|
* Write pen_release in a way that is guaranteed to be visible to all
|
|
|
|
* observers, irrespective of whether they're taking part in coherency
|
|
|
|
* or not. This is necessary for the hotplug code to work reliably.
|
|
|
|
*/
|
|
|
|
static void write_pen_release(int val)
|
2009-11-28 08:17:18 +01:00
|
|
|
{
|
ARM: Fix subtle race in CPU pen_release hotplug code
There is a subtle race in the CPU hotplug code, where a CPU which has
been offlined can online itself before being requested, which results
in things going astray on the next online/offline cycle.
What happens in the normal online/offline/online cycle is:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads -1
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
However, as the write of -1 of pen_release is not fully flushed back to
memory, and the checking of pen_release is done with caches disabled,
this allows CPU3 the opportunity to read the old value of pen_release:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads 3
starts boot
pen_release = -1
requests boot of CPU3
pen_release = 3
flush cache line
Fix this by grouping the write of pen_release along with its cache line
flushing code to ensure that any update to pen_release is always pushed
out to physical memory.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-18 11:53:12 +01:00
|
|
|
pen_release = val;
|
|
|
|
smp_wmb();
|
|
|
|
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
|
|
|
|
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
|
2009-11-28 08:17:18 +01:00
|
|
|
}
|
|
|
|
|
2010-12-08 06:37:57 +01:00
|
|
|
static void __iomem *scu_base_addr(void)
|
|
|
|
{
|
|
|
|
if (cpu_is_u5500())
|
|
|
|
return __io_address(U5500_SCU_BASE);
|
|
|
|
else if (cpu_is_u8500())
|
|
|
|
return __io_address(U8500_SCU_BASE);
|
|
|
|
else
|
|
|
|
ux500_unknown_soc();
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-11-28 08:17:18 +01:00
|
|
|
static DEFINE_SPINLOCK(boot_lock);
|
|
|
|
|
|
|
|
void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* if any interrupts are already enabled for the primary
|
|
|
|
* core (e.g. timer irq), then they will not have been enabled
|
|
|
|
* for us: do so
|
|
|
|
*/
|
2010-12-04 17:01:03 +01:00
|
|
|
gic_secondary_init(0);
|
2009-11-28 08:17:18 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* let the primary processor know we're out of the
|
|
|
|
* pen, then head off into the C entry point
|
|
|
|
*/
|
ARM: Fix subtle race in CPU pen_release hotplug code
There is a subtle race in the CPU hotplug code, where a CPU which has
been offlined can online itself before being requested, which results
in things going astray on the next online/offline cycle.
What happens in the normal online/offline/online cycle is:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads -1
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
However, as the write of -1 of pen_release is not fully flushed back to
memory, and the checking of pen_release is done with caches disabled,
this allows CPU3 the opportunity to read the old value of pen_release:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads 3
starts boot
pen_release = -1
requests boot of CPU3
pen_release = 3
flush cache line
Fix this by grouping the write of pen_release along with its cache line
flushing code to ensure that any update to pen_release is always pushed
out to physical memory.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-18 11:53:12 +01:00
|
|
|
write_pen_release(-1);
|
2009-11-28 08:17:18 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Synchronise with the boot thread.
|
|
|
|
*/
|
|
|
|
spin_lock(&boot_lock);
|
|
|
|
spin_unlock(&boot_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
|
|
{
|
|
|
|
unsigned long timeout;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set synchronisation state between this boot processor
|
|
|
|
* and the secondary one
|
|
|
|
*/
|
|
|
|
spin_lock(&boot_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The secondary processor is waiting to be released from
|
|
|
|
* the holding pen - release it, then wait for it to flag
|
|
|
|
* that it has been released by resetting pen_release.
|
|
|
|
*/
|
2011-08-09 13:21:36 +02:00
|
|
|
write_pen_release(cpu_logical_map(cpu));
|
2009-11-28 08:17:18 +01:00
|
|
|
|
2011-04-03 14:01:30 +02:00
|
|
|
gic_raise_softirq(cpumask_of(cpu), 1);
|
2010-09-15 11:45:51 +02:00
|
|
|
|
2009-11-28 08:17:18 +01:00
|
|
|
timeout = jiffies + (1 * HZ);
|
|
|
|
while (time_before(jiffies, timeout)) {
|
|
|
|
if (pen_release == -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now the secondary core is starting up let it run its
|
|
|
|
* calibrations, then wait for it to finish
|
|
|
|
*/
|
|
|
|
spin_unlock(&boot_lock);
|
|
|
|
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init wakeup_secondary(void)
|
|
|
|
{
|
2010-12-08 06:37:57 +01:00
|
|
|
void __iomem *backupram;
|
|
|
|
|
|
|
|
if (cpu_is_u5500())
|
|
|
|
backupram = __io_address(U5500_BACKUPRAM0_BASE);
|
|
|
|
else if (cpu_is_u8500())
|
|
|
|
backupram = __io_address(U8500_BACKUPRAM0_BASE);
|
|
|
|
else
|
|
|
|
ux500_unknown_soc();
|
|
|
|
|
2009-11-28 08:17:18 +01:00
|
|
|
/*
|
|
|
|
* write the address of secondary startup into the backup ram register
|
|
|
|
* at offset 0x1FF4, then write the magic number 0xA1FEED01 to the
|
|
|
|
* backup ram register at offset 0x1FF0, which is what boot rom code
|
|
|
|
* is waiting for. This would wake up the secondary core from WFE
|
|
|
|
*/
|
2010-12-08 06:37:57 +01:00
|
|
|
#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4
|
2009-11-28 08:17:18 +01:00
|
|
|
__raw_writel(virt_to_phys(u8500_secondary_startup),
|
2010-12-08 06:37:57 +01:00
|
|
|
backupram + UX500_CPU1_JUMPADDR_OFFSET);
|
2009-11-28 08:17:18 +01:00
|
|
|
|
2010-12-08 06:37:57 +01:00
|
|
|
#define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
|
2009-11-28 08:17:18 +01:00
|
|
|
__raw_writel(0xA1FEED01,
|
2010-12-08 06:37:57 +01:00
|
|
|
backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
|
2009-11-28 08:17:18 +01:00
|
|
|
|
|
|
|
/* make sure write buffer is drained */
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialise the CPU possible map early - this describes the CPUs
|
|
|
|
* which may be present or become present in the system.
|
|
|
|
*/
|
|
|
|
void __init smp_init_cpus(void)
|
|
|
|
{
|
2010-12-08 06:37:57 +01:00
|
|
|
void __iomem *scu_base = scu_base_addr();
|
2010-12-02 19:09:37 +01:00
|
|
|
unsigned int i, ncores;
|
2009-11-28 08:17:18 +01:00
|
|
|
|
2010-12-08 06:37:57 +01:00
|
|
|
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
2009-11-28 08:17:18 +01:00
|
|
|
|
|
|
|
/* sanity check */
|
2011-10-20 23:04:18 +02:00
|
|
|
if (ncores > nr_cpu_ids) {
|
|
|
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
|
|
|
ncores, nr_cpu_ids);
|
|
|
|
ncores = nr_cpu_ids;
|
2009-11-28 08:17:18 +01:00
|
|
|
}
|
|
|
|
|
2010-12-03 11:42:58 +01:00
|
|
|
for (i = 0; i < ncores; i++)
|
|
|
|
set_cpu_possible(i, true);
|
2011-04-03 14:01:30 +02:00
|
|
|
|
|
|
|
set_smp_cross_call(gic_raise_softirq);
|
2010-12-03 11:42:58 +01:00
|
|
|
}
|
2009-11-28 08:17:18 +01:00
|
|
|
|
2010-12-03 12:09:48 +01:00
|
|
|
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
2010-12-03 11:42:58 +01:00
|
|
|
{
|
2009-11-28 08:17:18 +01:00
|
|
|
|
2010-12-08 06:37:57 +01:00
|
|
|
scu_enable(scu_base_addr());
|
2010-12-03 12:09:48 +01:00
|
|
|
wakeup_secondary();
|
2009-11-28 08:17:18 +01:00
|
|
|
}
|