arm: spelling fixes

Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Michael Tokarev 2023-07-14 14:14:49 +03:00
parent cced0d6539
commit 673d821541
24 changed files with 30 additions and 30 deletions

View File

@ -1,6 +1,6 @@
/*
* SPDX-License-Identifier: GPL-2.0-or-later
* Host specific cpu indentification for AArch64.
* Host specific cpu identification for AArch64.
*/
#ifndef HOST_CPUINFO_H

View File

@ -1565,7 +1565,7 @@ static void ast1030_evb_i2c_init(AspeedMachineState *bmc)
{
AspeedSoCState *soc = &bmc->soc;
/* U10 24C08 connects to SDA/SCL Groupt 1 by default */
/* U10 24C08 connects to SDA/SCL Group 1 by default */
uint8_t *eeprom_buf = g_malloc0(32 * 1024);
smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 0), 0x50, eeprom_buf);

View File

@ -1205,7 +1205,7 @@ static void mps2_tz_idau_check(IDAUInterface *ii, uint32_t address,
{
/*
* The MPS2 TZ FPGA images have IDAUs in them which are connected to
* the Master Security Controllers. Thes have the same logic as
* the Master Security Controllers. These have the same logic as
* is used by the IoTKit for the IDAU connected to the CPU, except
* that MSCs don't care about the NSC attribute.
*/

View File

@ -239,7 +239,7 @@ static inline bool gic_lr_entry_is_free(uint32_t entry)
}
/* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the
* corrsponding bit in EISR is set.
* corresponding bit in EISR is set.
*/
static inline bool gic_lr_entry_is_eoi(uint32_t entry)
{
@ -1333,7 +1333,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
/* ??? This currently clears the pending bit for all CPUs, even
for per-CPU interrupts. It's unclear whether this is the
corect behavior. */
correct behavior. */
if (value & (1 << i)) {
GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
}

View File

@ -494,7 +494,7 @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
/* Only the ProcessorSleep bit is writable. When the guest sets
* it, it requests that we transition the channel between the
* redistributor and the cpu interface to quiescent, and that
* we set the ChildrenAsleep bit once the inteface has reached the
* we set the ChildrenAsleep bit once the interface has reached the
* quiescent state.
* Setting the ProcessorSleep to 0 reverses the quiescing, and
* ChildrenAsleep is cleared once the transition is complete.

View File

@ -894,7 +894,7 @@ int armv7m_nvic_complete_irq(NVICState *s, int irq, bool secure)
vec->active = 0;
if (vec->level) {
/* Re-pend the exception if it's still held high; only
* happens for extenal IRQs
* happens for external IRQs
*/
assert(irq >= NVIC_FIRST_IRQ);
vec->pending = 1;

View File

@ -368,7 +368,7 @@ static const MemoryRegionOps allwinner_r40_detect_ops = {
/*
* mctl_r40_detect_rank_count in u-boot will write the high 1G of DDR
* to detect wether the board support dual_rank or not. Create a virtual memory
* to detect whether the board support dual_rank or not. Create a virtual memory
* if the board's ram_size less or equal than 1G, and set read time out flag of
* REG_DRAMCTL_PGSR when the user touch this high dram.
*/

View File

@ -1,5 +1,5 @@
/*
* Exynos4210 Pseudo Random Nubmer Generator Emulation
* Exynos4210 Pseudo Random Number Generator Emulation
*
* Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
*

View File

@ -165,7 +165,7 @@ enum FslIMX7MemoryMap {
* Some versions of the reference manual claim that UART2 is @
* 0x30870000, but experiments with HW + DT files in upstream
* Linux kernel show that not to be true and that block is
* acutally located @ 0x30890000
* actually located @ 0x30890000
*/
FSL_IMX7_UART2_ADDR = 0x30890000,
FSL_IMX7_UART3_ADDR = 0x30880000,

View File

@ -74,7 +74,7 @@ struct NVICState {
*/
bool vectpending_is_s_banked;
int exception_prio; /* group prio of the highest prio active exception */
int vectpending_prio; /* group prio of the exeception in vectpending */
int vectpending_prio; /* group prio of the exception in vectpending */
MemoryRegion sysregmem;

View File

@ -677,7 +677,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
}
/*
* The PSTATE bits only mask the interrupt if we have not overriden the
* The PSTATE bits only mask the interrupt if we have not overridden the
* ability above.
*/
return unmasked || pstate_unmasked;

View File

@ -2592,7 +2592,7 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el)
return aa64;
}
/* Function for determing whether guest cp register reads and writes should
/* Function for determining whether guest cp register reads and writes should
* access the secure or non-secure bank of a cp register. When EL3 is
* operating in AArch32 state, the NS-bit determines whether the secure
* instance of a cp register should be used. When EL3 is AArch64 (or if

View File

@ -95,7 +95,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
if (kvm_enabled()) {
/*
* For KVM we have to automatically enable all supported unitialized
* For KVM we have to automatically enable all supported uninitialized
* lengths, even when the smaller lengths are not all powers-of-two.
*/
vq_map |= vq_supported & ~vq_init & vq_mask;

View File

@ -1674,7 +1674,7 @@ static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
* pmevtyper_rawwrite is called between a pair of pmu_op_start and
* pmu_op_finish calls when loading saved state for a migration. Because
* we're potentially updating the type of event here, the value written to
* c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
* c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
* different counter type. Therefore, we need to set this value to the
* current count for the counter type we're writing so that pmu_op_finish
* has the correct count for its calculation.
@ -7009,7 +7009,7 @@ static const ARMCPRegInfo rme_reginfo[] = {
/*
* QEMU does not have a way to invalidate by physical address, thus
* invalidating a range of physical addresses is accomplished by
* flushing all tlb entries in the outer sharable domain,
* flushing all tlb entries in the outer shareable domain,
* just like PAALLOS.
*/
{ .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,

View File

@ -148,7 +148,7 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
* R: 0 because unpriv and A flag not set
* SRVALID: 0 because NS
* MRVALID: 0 because unpriv and A flag not set
* SREGION: 0 becaus SRVALID is 0
* SREGION: 0 because SRVALID is 0
* MREGION: 0 because MRVALID is 0
*/
return 0;

View File

@ -182,7 +182,7 @@ void gen_a64_update_pc(DisasContext *s, target_long diff)
* + for EL2 and EL3 there is only one TBI bit, and if it is set
* then the address is zero-extended, clearing bits [63:56]
* + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
* and TBI1 controls addressses with bit 55 == 1.
* and TBI1 controls addresses with bit 55 == 1.
* If the appropriate TBI bit is set for the address then
* the address is sign-extended from bit 55 into bits [63:56]
*
@ -2313,7 +2313,7 @@ static void handle_sys(DisasContext *s, bool isread,
if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
/*
* A write to any coprocessor regiser that ends a TB
* A write to any coprocessor register that ends a TB
* must rebuild the hflags for the next TB.
*/
gen_rebuild_hflags(s);

View File

@ -2182,7 +2182,7 @@ static bool trans_VMOV_to_2gp(DisasContext *s, arg_VMOV_to_2gp *a)
* execution if it is not in an IT block. For us this means
* only that if PSR.ECI says we should not be executing the beat
* corresponding to the lane of the vector register being accessed
* then we should skip perfoming the move, and that we need to do
* then we should skip performing the move, and that we need to do
* the usual check for bad ECI state and advance of ECI state.
* (If PSR.ECI is non-zero then we cannot be in an IT block.)
*/
@ -2225,7 +2225,7 @@ static bool trans_VMOV_from_2gp(DisasContext *s, arg_VMOV_to_2gp *a)
* execution if it is not in an IT block. For us this means
* only that if PSR.ECI says we should not be executing the beat
* corresponding to the lane of the vector register being accessed
* then we should skip perfoming the move, and that we need to do
* then we should skip performing the move, and that we need to do
* the usual check for bad ECI state and advance of ECI state.
* (If PSR.ECI is non-zero then we cannot be in an IT block.)
*/

View File

@ -1841,7 +1841,7 @@ TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext)
/* Perform an inline saturating addition of a 32-bit value within
* a 64-bit register. The second operand is known to be positive,
* which halves the comparisions we must perform to bound the result.
* which halves the comparisons we must perform to bound the result.
*/
static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
{

View File

@ -144,7 +144,7 @@ static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update)
* Generate code for M-profile FP context handling: update the
* ownership of the FP context, and create a new context if
* necessary. This corresponds to the parts of the pseudocode
* ExecuteFPCheck() after the inital PreserveFPState() call.
* ExecuteFPCheck() after the initial PreserveFPState() call.
*/
static void gen_update_fp_context(DisasContext *s)
{

View File

@ -2626,7 +2626,7 @@ void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
* Process the entire segment at once, writing back the
* results only after we've consumed all of the inputs.
*
* Key to indicies by column:
* Key to indices by column:
* i j i k j k
*/
sum00 = a[s + H4(0 + 0)];

View File

@ -1,6 +1,6 @@
from __future__ import print_function
#
# Test the SVE registers are visable and changeable via gdbstub
# Test the SVE registers are visible and changeable via gdbstub
#
# This is launched via tests/guest-debug/run-test.py
#

View File

@ -28,7 +28,7 @@ asm(
" fmopa za1.s, p0/m, p0/m, z0.s, z0.s\n"
/*
* Read the first 4x4 sub-matrix of elements from tile 1:
* Note that za1h should be interchangable here.
* Note that za1h should be interchangeable here.
*/
" mov w12, #0\n"
" mova z0.s, p0/m, za1v.s[w12, #0]\n"

View File

@ -9,7 +9,7 @@
/*
* Semihosting interface on ARM AArch64
* See "Semihosting for AArch32 and AArch64 Relase 2.0" by ARM
* See "Semihosting for AArch32 and AArch64 Release 2.0" by ARM
* w0 - semihosting call number
* x1 - semihosting parameter
*/
@ -147,7 +147,7 @@ __start:
* T0SZ[5:0] = 2^(64 - 25)
*
* The size of T0SZ controls what the initial lookup level. It
* would be nice to start at level 2 but unfortunatly for a
* would be nice to start at level 2 but unfortunately for a
* flat-mapping on the virt machine we need to handle IA's
* with at least 1gb range to see RAM. So we start with a
* level 1 lookup.
@ -189,7 +189,7 @@ __start:
msr cpacr_el1, x0
/* Setup some stack space and enter the test code.
* Assume everthing except the return value is garbage when we
* Assume everything except the return value is garbage when we
* return, we won't need it.
*/
adrp x0, stack_end

View File

@ -86,7 +86,7 @@ int main(int argc, char *argv[argc])
}
ptr_to_heap++;
}
ml_printf("r/w to heap upto %p\n", ptr_to_heap);
ml_printf("r/w to heap up to %p\n", ptr_to_heap);
ml_printf("Passed HeapInfo checks\n");
return 0;