Revert "ARM: OMAP4: remove dead kconfig option OMAP4_ERRATA_I688"

This reverts commit 606da4826b.

We actually need this code for proper behaviour of OMAP4, and it needs
fixing a different way other than just removing the code.  Disabling
code which is necessary in the hopes of persuing multiplatform kernels
is a stupid approach.

Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Richard Woodruff <r-woodruff2@ti.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Russell King 2015-06-06 00:13:40 +01:00
parent 4e1f8a6f1d
commit f746929ffd
7 changed files with 105 additions and 0 deletions

View File

@ -240,6 +240,27 @@ config OMAP3_SDRC_AC_TIMING
wish to say no. Selecting yes without understanding what is
going on could result in system crashes;
config OMAP4_ERRATA_I688
bool "OMAP4 errata: Async Bridge Corruption"
depends on (ARCH_OMAP4 || SOC_OMAP5) && !ARCH_MULTIPLATFORM
select ARCH_HAS_BARRIERS
help
If a data is stalled inside asynchronous bridge because of back
pressure, it may be accepted multiple times, creating pointer
misalignment that will corrupt next transfers on that data path
until next reset of the system (No recovery procedure once the
issue is hit, the path remains consistently broken). Async bridge
can be found on path between MPU to EMIF and MPU to L3 interconnect.
This situation can happen only when the idle is initiated by a
Master Request Disconnection (which is trigged by software when
executing WFI on CPU).
The work-around for this errata needs all the initiators connected
through async bridge must ensure that data path is properly drained
before issuing WFI. This condition will be met if one Strongly ordered
access is performed to the target right before executing the WFI.
In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
IO barrier ensure that there is no synchronisation loss on initiators
operating on both interconnect port simultaneously.
endmenu
endif

View File

@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
void __init omap_reserve(void)
{
omap_secure_ram_reserve_memblock();
omap_barrier_reserve_memblock();
}

View File

@ -200,6 +200,9 @@ void __init omap4_map_io(void);
void __init omap5_map_io(void);
void __init ti81xx_map_io(void);
/* omap_barriers_init() is OMAP4 only */
void omap_barriers_init(void);
/**
* omap_test_timeout - busy-loop, testing a condition
* @cond: condition to test until it evaluates to true

View File

@ -306,6 +306,7 @@ void __init am33xx_map_io(void)
void __init omap4_map_io(void)
{
iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
omap_barriers_init();
}
#endif
@ -313,6 +314,7 @@ void __init omap4_map_io(void)
void __init omap5_map_io(void)
{
iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
omap_barriers_init();
}
#endif
/*

View File

@ -70,6 +70,13 @@ extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
extern u32 rx51_secure_update_aux_cr(u32 set_bits, u32 clear_bits);
extern u32 rx51_secure_rng_call(u32 ptr, u32 count, u32 flag);
#ifdef CONFIG_OMAP4_ERRATA_I688
extern int omap_barrier_reserve_memblock(void);
#else
static inline void omap_barrier_reserve_memblock(void)
{ }
#endif
#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
void set_cntfreq(void);
#else

View File

@ -51,6 +51,75 @@ static void __iomem *twd_base;
#define IRQ_LOCALTIMER 29
#ifdef CONFIG_OMAP4_ERRATA_I688
/* Used to implement memory barrier on DRAM path */
#define OMAP4_DRAM_BARRIER_VA 0xfe600000
void __iomem *dram_sync, *sram_sync;
static phys_addr_t paddr;
static u32 size;
void omap_bus_sync(void)
{
if (dram_sync && sram_sync) {
writel_relaxed(readl_relaxed(dram_sync), dram_sync);
writel_relaxed(readl_relaxed(sram_sync), sram_sync);
isb();
}
}
EXPORT_SYMBOL(omap_bus_sync);
static int __init omap4_sram_init(void)
{
struct device_node *np;
struct gen_pool *sram_pool;
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
if (!np)
pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
__func__);
sram_pool = of_get_named_gen_pool(np, "sram", 0);
if (!sram_pool)
pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
__func__);
else
sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
return 0;
}
omap_arch_initcall(omap4_sram_init);
/* Steal one page physical memory for barrier implementation */
int __init omap_barrier_reserve_memblock(void)
{
size = ALIGN(PAGE_SIZE, SZ_1M);
paddr = arm_memblock_steal(size, SZ_1M);
return 0;
}
void __init omap_barriers_init(void)
{
struct map_desc dram_io_desc[1];
dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
dram_io_desc[0].pfn = __phys_to_pfn(paddr);
dram_io_desc[0].length = size;
dram_io_desc[0].type = MT_MEMORY_RW_SO;
iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
dram_sync = (void __iomem *) dram_io_desc[0].virtual;
pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
(long long) paddr, dram_io_desc[0].virtual);
}
#else
void __init omap_barriers_init(void)
{}
#endif
void gic_dist_disable(void)
{
if (gic_dist_base_addr)

View File

@ -333,9 +333,11 @@ ENDPROC(omap4_cpu_resume)
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
#ifndef CONFIG_OMAP4_ERRATA_I688
ENTRY(omap_bus_sync)
ret lr
ENDPROC(omap_bus_sync)
#endif
ENTRY(omap_do_wfi)
stmfd sp!, {lr}