ARM: uniphier: remove SoC-specific SMP code

The UniPhier architecture (32bit) switched over to PSCI.  Remove
the SoC-specific SMP operations.

Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
This commit is contained in:
Masahiro Yamada 2016-08-10 20:00:48 +09:00
parent 29b4817d40
commit dd34b11566
5 changed files with 6 additions and 331 deletions

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> * Copyright (C) 2015-2016 Socionext Inc.
* Author: Masahiro Yamada <yamada.masahiro@socionext.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@ -19,28 +20,11 @@
#ifdef CONFIG_CACHE_UNIPHIER #ifdef CONFIG_CACHE_UNIPHIER
int uniphier_cache_init(void); int uniphier_cache_init(void);
int uniphier_cache_l2_is_enabled(void);
void uniphier_cache_l2_touch_range(unsigned long start, unsigned long end);
void uniphier_cache_l2_set_locked_ways(u32 way_mask);
#else #else
static inline int uniphier_cache_init(void) static inline int uniphier_cache_init(void)
{ {
return -ENODEV; return -ENODEV;
} }
static inline int uniphier_cache_l2_is_enabled(void)
{
return 0;
}
static inline void uniphier_cache_l2_touch_range(unsigned long start,
unsigned long end)
{
}
static inline void uniphier_cache_l2_set_locked_ways(u32 way_mask)
{
}
#endif #endif
#endif /* __CACHE_UNIPHIER_H */ #endif /* __CACHE_UNIPHIER_H */

View File

@ -1 +1 @@
obj-$(CONFIG_SMP) += platsmp.o headsmp.o obj- += dummy.o

View File

@ -1,43 +0,0 @@
/*
* Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cp15.h>
ENTRY(uniphier_smp_trampoline)
ARM_BE8(setend be) @ ensure we are in BE8 mode
mrc p15, 0, r0, c0, c0, 5 @ MPIDR (Multiprocessor Affinity Reg)
and r2, r0, #0x3 @ CPU ID
ldr r1, uniphier_smp_trampoline_jump
ldr r3, uniphier_smp_trampoline_poll_addr
mrc p15, 0, r0, c1, c0, 0 @ SCTLR (System Control Register)
orr r0, r0, #CR_I @ Enable ICache
bic r0, r0, #(CR_C | CR_M) @ Disable MMU and Dcache
mcr p15, 0, r0, c1, c0, 0
b 1f @ cache the following 5 instructions
0: wfe
1: ldr r0, [r3]
cmp r0, r2
bxeq r1 @ branch to secondary_startup
b 0b
.globl uniphier_smp_trampoline_jump
uniphier_smp_trampoline_jump:
.word 0 @ set virt_to_phys(secondary_startup)
.globl uniphier_smp_trampoline_poll_addr
uniphier_smp_trampoline_poll_addr:
.word 0 @ set CPU ID to be kicked to this reg
.globl uniphier_smp_trampoline_end
uniphier_smp_trampoline_end:
ENDPROC(uniphier_smp_trampoline)

View File

@ -1,209 +0,0 @@
/*
* Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "uniphier: " fmt
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/sizes.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-uniphier.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/smp_scu.h>
/*
* The secondary CPUs check this register from the boot ROM for the jump
* destination. After that, it can be reused as a scratch register.
*/
#define UNIPHIER_SMPCTRL_ROM_RSV2 0x208
static void __iomem *uniphier_smp_rom_boot_rsv2;
static unsigned int uniphier_smp_max_cpus;
extern char uniphier_smp_trampoline;
extern char uniphier_smp_trampoline_jump;
extern char uniphier_smp_trampoline_poll_addr;
extern char uniphier_smp_trampoline_end;
/*
* Copy trampoline code to the tail of the 1st section of the page table used
* in the boot ROM. This area is directly accessible by the secondary CPUs
* for all the UniPhier SoCs.
*/
static const phys_addr_t uniphier_smp_trampoline_dest_end = SECTION_SIZE;
static phys_addr_t uniphier_smp_trampoline_dest;
static int __init uniphier_smp_copy_trampoline(phys_addr_t poll_addr)
{
size_t trmp_size;
static void __iomem *trmp_base;
if (!uniphier_cache_l2_is_enabled()) {
pr_warn("outer cache is needed for SMP, but not enabled\n");
return -ENODEV;
}
uniphier_cache_l2_set_locked_ways(1);
outer_flush_all();
trmp_size = &uniphier_smp_trampoline_end - &uniphier_smp_trampoline;
uniphier_smp_trampoline_dest = uniphier_smp_trampoline_dest_end -
trmp_size;
uniphier_cache_l2_touch_range(uniphier_smp_trampoline_dest,
uniphier_smp_trampoline_dest_end);
trmp_base = ioremap_cache(uniphier_smp_trampoline_dest, trmp_size);
if (!trmp_base) {
pr_err("failed to map trampoline destination area\n");
return -ENOMEM;
}
memcpy(trmp_base, &uniphier_smp_trampoline, trmp_size);
writel(virt_to_phys(secondary_startup),
trmp_base + (&uniphier_smp_trampoline_jump -
&uniphier_smp_trampoline));
writel(poll_addr, trmp_base + (&uniphier_smp_trampoline_poll_addr -
&uniphier_smp_trampoline));
flush_cache_all(); /* flush out trampoline code to outer cache */
iounmap(trmp_base);
return 0;
}
static int __init uniphier_smp_prepare_trampoline(unsigned int max_cpus)
{
struct device_node *np;
struct resource res;
phys_addr_t rom_rsv2_phys;
int ret;
np = of_find_compatible_node(NULL, NULL, "socionext,uniphier-smpctrl");
ret = of_address_to_resource(np, 0, &res);
of_node_put(np);
if (ret) {
pr_err("failed to get resource of SMP control\n");
return ret;
}
rom_rsv2_phys = res.start + UNIPHIER_SMPCTRL_ROM_RSV2;
ret = uniphier_smp_copy_trampoline(rom_rsv2_phys);
if (ret)
return ret;
uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, SZ_4);
if (!uniphier_smp_rom_boot_rsv2) {
pr_err("failed to map ROM_BOOT_RSV2 register\n");
return -ENOMEM;
}
writel(uniphier_smp_trampoline_dest, uniphier_smp_rom_boot_rsv2);
asm("sev"); /* Bring up all secondary CPUs to the trampoline code */
uniphier_smp_max_cpus = max_cpus; /* save for later use */
return 0;
}
static void __init uniphier_smp_unprepare_trampoline(void)
{
iounmap(uniphier_smp_rom_boot_rsv2);
if (uniphier_smp_trampoline_dest)
outer_inv_range(uniphier_smp_trampoline_dest,
uniphier_smp_trampoline_dest_end);
uniphier_cache_l2_set_locked_ways(0);
}
static int __init uniphier_smp_enable_scu(void)
{
unsigned long scu_base_phys = 0;
void __iomem *scu_base;
if (scu_a9_has_base())
scu_base_phys = scu_a9_get_base();
if (!scu_base_phys) {
pr_err("failed to get scu base\n");
return -ENODEV;
}
scu_base = ioremap(scu_base_phys, SZ_128);
if (!scu_base) {
pr_err("failed to map scu base\n");
return -ENOMEM;
}
scu_enable(scu_base);
iounmap(scu_base);
return 0;
}
static void __init uniphier_smp_prepare_cpus(unsigned int max_cpus)
{
static cpumask_t only_cpu_0 = { CPU_BITS_CPU0 };
int ret;
ret = uniphier_smp_prepare_trampoline(max_cpus);
if (ret)
goto err;
ret = uniphier_smp_enable_scu();
if (ret)
goto err;
return;
err:
pr_warn("disabling SMP\n");
init_cpu_present(&only_cpu_0);
uniphier_smp_unprepare_trampoline();
}
static int __init uniphier_smp_boot_secondary(unsigned int cpu,
struct task_struct *idle)
{
if (WARN_ON_ONCE(!uniphier_smp_rom_boot_rsv2))
return -EFAULT;
writel(cpu, uniphier_smp_rom_boot_rsv2);
readl(uniphier_smp_rom_boot_rsv2); /* relax */
asm("sev"); /* wake up secondary CPUs sleeping in the trampoline */
if (cpu == uniphier_smp_max_cpus - 1) {
/* clean up resources if this is the last CPU */
uniphier_smp_unprepare_trampoline();
}
return 0;
}
static const struct smp_operations uniphier_smp_ops __initconst = {
.smp_prepare_cpus = uniphier_smp_prepare_cpus,
.smp_boot_secondary = uniphier_smp_boot_secondary,
};
CPU_METHOD_OF_DECLARE(uniphier_smp, "socionext,uniphier-smp",
&uniphier_smp_ops);

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> * Copyright (C) 2015-2016 Socionext Inc.
* Author: Masahiro Yamada <yamada.masahiro@socionext.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@ -43,27 +44,15 @@
#define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */ #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
#define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */ #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
#define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */ #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
#define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
#define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
#define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
#define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
#define UNIPHIER_SSCOQM_S_MASK (0x3 << 17) #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
#define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17) #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
#define UNIPHIER_SSCOQM_S_ALL (0x1 << 17) #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
#define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
#define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */ #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
#define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */ #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
#define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */ #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
#define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */ #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
#define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
#define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
#define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
#define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
#define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
#define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */ #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
#define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */ #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
#define UNIPHIER_SSCOQMASK 0x254 /* Cache Operation Queue Address Mask */
#define UNIPHIER_SSCOQWN 0x258 /* Cache Operation Queue Way Number */
#define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/ #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
#define UNIPHIER_SSCOPPQSEF_FE BIT(1) #define UNIPHIER_SSCOPPQSEF_FE BIT(1)
#define UNIPHIER_SSCOPPQSEF_OE BIT(0) #define UNIPHIER_SSCOPPQSEF_OE BIT(0)
@ -72,9 +61,6 @@
#define UNIPHIER_SSCOLPQS_EST BIT(1) #define UNIPHIER_SSCOLPQS_EST BIT(1)
#define UNIPHIER_SSCOLPQS_QST BIT(0) #define UNIPHIER_SSCOLPQS_QST BIT(0)
/* Is the touch/pre-fetch destination specified by ways? */
#define UNIPHIER_SSCOQM_TID_IS_WAY(op) \
((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
/* Is the operation region specified by address range? */ /* Is the operation region specified by address range? */
#define UNIPHIER_SSCOQM_S_IS_RANGE(op) \ #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE) ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
@ -178,11 +164,6 @@ static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD); writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ); writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
} }
/* set target ways if needed */
if (unlikely(UNIPHIER_SSCOQM_TID_IS_WAY(operation)))
writel_relaxed(data->way_locked_mask,
data->op_base + UNIPHIER_SSCOQWN);
} while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) & } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
(UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE))); (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
@ -338,46 +319,8 @@ static void uniphier_cache_sync(void)
__uniphier_cache_sync(data); __uniphier_cache_sync(data);
} }
int __init uniphier_cache_l2_is_enabled(void)
{
struct uniphier_cache_data *data;
data = list_first_entry_or_null(&uniphier_cache_list,
struct uniphier_cache_data, list);
if (!data)
return 0;
return !!(readl_relaxed(data->ctrl_base + UNIPHIER_SSCC) &
UNIPHIER_SSCC_ON);
}
void __init uniphier_cache_l2_touch_range(unsigned long start,
unsigned long end)
{
struct uniphier_cache_data *data;
data = list_first_entry_or_null(&uniphier_cache_list,
struct uniphier_cache_data, list);
if (data)
__uniphier_cache_maint_range(data, start, end,
UNIPHIER_SSCOQM_TID_WAY |
UNIPHIER_SSCOQM_CM_TOUCH);
}
void __init uniphier_cache_l2_set_locked_ways(u32 way_mask)
{
struct uniphier_cache_data *data;
data = list_first_entry_or_null(&uniphier_cache_list,
struct uniphier_cache_data, list);
if (data)
__uniphier_cache_set_locked_ways(data, way_mask);
}
static const struct of_device_id uniphier_cache_match[] __initconst = { static const struct of_device_id uniphier_cache_match[] __initconst = {
{ { .compatible = "socionext,uniphier-system-cache" },
.compatible = "socionext,uniphier-system-cache",
},
{ /* sentinel */ } { /* sentinel */ }
}; };