[AARCH64] Add support for new control bits CTR_EL0.DIC and CTR_EL0.IDC

The DCache clean & ICache invalidation requirements for instructions
to be data coherence are discoverable through new fields in CTR_EL0.
Let's support the two bits if they are enabled, the CPU core will
not execute the unnecessary DCache clean or Icache Invalidation
instructions.

2019-09-25  Shaokun Zhang  <zhangshaokun@hisilicon.com>

	* config/aarch64/sync-cache.c (__aarch64_sync_cache_range): Add support for
	CTR_EL0.IDC and CTR_EL0.DIC.

From-SVN: r276122
This commit is contained in:
Shaokun Zhang 2019-09-25 12:38:59 +00:00 committed by Kyrylo Tkachov
parent 21f7f9980c
commit 761e6bb9f7
2 changed files with 41 additions and 21 deletions

View File

@ -1,3 +1,8 @@
2019-09-25 Shaokun Zhang <zhangshaokun@hisilicon.com>
* config/aarch64/sync-cache.c (__aarch64_sync_cache_range): Add support for
CTR_EL0.IDC and CTR_EL0.DIC.
2019-09-20 Christophe Lyon <christophe.lyon@st.com>
Revert:

View File

@ -23,6 +23,9 @@ a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#define CTR_IDC_SHIFT 28
#define CTR_DIC_SHIFT 29
void __aarch64_sync_cache_range (const void *, const void *);
void
@ -41,32 +44,44 @@ __aarch64_sync_cache_range (const void *base, const void *end)
icache_lsize = 4 << (cache_info & 0xF);
dcache_lsize = 4 << ((cache_info >> 16) & 0xF);
/* Loop over the address range, clearing one cache line at once.
Data cache must be flushed to unification first to make sure the
instruction cache fetches the updated data. 'end' is exclusive,
as per the GNU definition of __clear_cache. */
/* If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification is
not required for instruction to data coherence. */
/* Make the start address of the loop cache aligned. */
address = (const char*) ((__UINTPTR_TYPE__) base
& ~ (__UINTPTR_TYPE__) (dcache_lsize - 1));
if (((cache_info >> CTR_IDC_SHIFT) & 0x1) == 0x0) {
/* Loop over the address range, clearing one cache line at once.
Data cache must be flushed to unification first to make sure the
instruction cache fetches the updated data. 'end' is exclusive,
as per the GNU definition of __clear_cache. */
for (; address < (const char *) end; address += dcache_lsize)
asm volatile ("dc\tcvau, %0"
:
: "r" (address)
: "memory");
/* Make the start address of the loop cache aligned. */
address = (const char*) ((__UINTPTR_TYPE__) base
& ~ (__UINTPTR_TYPE__) (dcache_lsize - 1));
for (; address < (const char *) end; address += dcache_lsize)
asm volatile ("dc\tcvau, %0"
:
: "r" (address)
: "memory");
}
asm volatile ("dsb\tish" : : : "memory");
/* Make the start address of the loop cache aligned. */
address = (const char*) ((__UINTPTR_TYPE__) base
& ~ (__UINTPTR_TYPE__) (icache_lsize - 1));
/* If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point of
Unification is not required for instruction to data coherence. */
for (; address < (const char *) end; address += icache_lsize)
asm volatile ("ic\tivau, %0"
:
: "r" (address)
: "memory");
if (((cache_info >> CTR_DIC_SHIFT) & 0x1) == 0x0) {
/* Make the start address of the loop cache aligned. */
address = (const char*) ((__UINTPTR_TYPE__) base
& ~ (__UINTPTR_TYPE__) (icache_lsize - 1));
asm volatile ("dsb\tish; isb" : : : "memory");
for (; address < (const char *) end; address += icache_lsize)
asm volatile ("ic\tivau, %0"
:
: "r" (address)
: "memory");
asm volatile ("dsb\tish" : : : "memory");
}
asm volatile("isb" : : : "memory");
}