i386: spelling fixes
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
944399ffb2
commit
bad5cfcd60
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Host specific cpu indentification for x86.
|
||||
* Host specific cpu identification for x86.
|
||||
*/
|
||||
|
||||
#ifndef HOST_CPUINFO_H
|
||||
|
@ -779,7 +779,7 @@ static Aml *initialize_route(Aml *route, const char *link_name,
|
||||
*
|
||||
* Returns an array of 128 routes, one for each device,
|
||||
* based on device location.
|
||||
* The main goal is to equaly distribute the interrupts
|
||||
* The main goal is to equally distribute the interrupts
|
||||
* over the 4 existing ACPI links (works only for i440fx).
|
||||
* The hash function is (slot + pin) & 3 -> "LNK[D|A|B|C]".
|
||||
*
|
||||
@ -2079,7 +2079,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert DMAR scope for PCI bridges and endpoint devcie
|
||||
* Insert DMAR scope for PCI bridges and endpoint devices
|
||||
*/
|
||||
static void
|
||||
insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque)
|
||||
|
@ -259,7 +259,7 @@ static void amdvi_log_command_error(AMDVIState *s, hwaddr addr)
|
||||
pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
|
||||
PCI_STATUS_SIG_TARGET_ABORT);
|
||||
}
|
||||
/* log an illegal comand event
|
||||
/* log an illegal command event
|
||||
* @addr : address of illegal command
|
||||
*/
|
||||
static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info,
|
||||
|
@ -52,7 +52,7 @@
|
||||
|
||||
/*
|
||||
* PCI bus number (or SID) is not reliable since the device is usaully
|
||||
* initalized before guest can configure the PCI bridge
|
||||
* initialized before guest can configure the PCI bridge
|
||||
* (SECONDARY_BUS_NUMBER).
|
||||
*/
|
||||
struct vtd_as_key {
|
||||
@ -1694,7 +1694,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
|
||||
* """
|
||||
*
|
||||
* We enable per as memory region (iommu_ir_fault) for catching
|
||||
* the tranlsation for interrupt range through PASID + PT.
|
||||
* the translation for interrupt range through PASID + PT.
|
||||
*/
|
||||
if (pt && as->pasid != PCI_NO_PASID) {
|
||||
memory_region_set_enabled(&as->iommu_ir_fault, true);
|
||||
|
@ -1156,7 +1156,7 @@ static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr,
|
||||
|
||||
/*
|
||||
* This matches the barrier in copy_to_ring() (or the guest's
|
||||
* equivalent) betweem writing the data to the ring and updating
|
||||
* equivalent) between writing the data to the ring and updating
|
||||
* rsp_prod. It protects against the pathological case (which
|
||||
* again I think never happened except on Alpha) where our
|
||||
* subsequent writes to the ring could *cross* the read of
|
||||
|
@ -1436,7 +1436,7 @@ static void save_node(gpointer key, gpointer value, gpointer opaque)
|
||||
/*
|
||||
* If we already wrote this node, refer to the previous copy.
|
||||
* There's no rename/move in XenStore, so all we need to find
|
||||
* it is the tx_id of the transation in which it exists. Which
|
||||
* it is the tx_id of the transaction in which it exists. Which
|
||||
* may be the root tx.
|
||||
*/
|
||||
if (n->serialized_tx != XBT_NULL) {
|
||||
|
@ -436,7 +436,7 @@ static uint64_t ioport80_read(void *opaque, hwaddr addr, unsigned size)
|
||||
return 0xffffffffffffffffULL;
|
||||
}
|
||||
|
||||
/* MSDOS compatibility mode FPU exception support */
|
||||
/* MS-DOS compatibility mode FPU exception support */
|
||||
static void ioportF0_write(void *opaque, hwaddr addr, uint64_t data,
|
||||
unsigned size)
|
||||
{
|
||||
@ -1755,7 +1755,7 @@ static void pc_machine_set_max_fw_size(Object *obj, Visitor *v,
|
||||
if (value > 16 * MiB) {
|
||||
error_setg(errp,
|
||||
"User specified max allowed firmware size %" PRIu64 " is "
|
||||
"greater than 16MiB. If combined firwmare size exceeds "
|
||||
"greater than 16MiB. If combined firmware size exceeds "
|
||||
"16MiB the system may not boot, or experience intermittent"
|
||||
"stability issues.",
|
||||
value);
|
||||
|
@ -31,7 +31,7 @@
|
||||
*
|
||||
* This code should be compatible with AMD's "Extended Method" described at:
|
||||
* AMD CPUID Specification (Publication #25481)
|
||||
* Section 3: Multiple Core Calcuation
|
||||
* Section 3: Multiple Core Calculation
|
||||
* as long as:
|
||||
* nr_threads is set to 1;
|
||||
* OFFSET_IDX is assumed to be 0;
|
||||
|
@ -5340,7 +5340,7 @@ static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
|
||||
return name;
|
||||
}
|
||||
|
||||
/* Compatibily hack to maintain legacy +-feat semantic,
|
||||
/* Compatibility hack to maintain legacy +-feat semantic,
|
||||
* where +-feat overwrites any feature set by
|
||||
* feat=on|feat even if the later is parsed after +-feat
|
||||
* (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
|
||||
@ -6303,7 +6303,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
* The initial value of xcr0 and ebx == 0, On host without kvm
|
||||
* commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
|
||||
* even through guest update xcr0, this will crash some legacy guest
|
||||
* (e.g., CentOS 6), So set ebx == ecx to workaroud it.
|
||||
* (e.g., CentOS 6), So set ebx == ecx to workaround it.
|
||||
*/
|
||||
*ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0, false);
|
||||
} else if (count == 1) {
|
||||
|
@ -728,7 +728,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
#define CPUID_EXT2_3DNOWEXT (1U << 30)
|
||||
#define CPUID_EXT2_3DNOW (1U << 31)
|
||||
|
||||
/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
|
||||
/* CPUID[8000_0001].EDX bits that are aliases of CPUID[1].EDX bits on AMD CPUs */
|
||||
#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
|
||||
CPUID_EXT2_DE | CPUID_EXT2_PSE | \
|
||||
CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
|
||||
@ -2071,7 +2071,7 @@ hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
|
||||
MemTxAttrs *attrs);
|
||||
int cpu_get_pic_interrupt(CPUX86State *s);
|
||||
|
||||
/* MSDOS compatibility mode FPU exception support */
|
||||
/* MS-DOS compatibility mode FPU exception support */
|
||||
void x86_register_ferr_irq(qemu_irq irq);
|
||||
void fpu_check_raise_ferr_irq(CPUX86State *s);
|
||||
void cpu_set_ignne(void);
|
||||
|
@ -4729,7 +4729,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
|
||||
/*
|
||||
* Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
|
||||
* root operation upon vCPU reset. kvm_put_msr_feature_control() should also
|
||||
* preceed kvm_put_nested_state() when 'real' nested state is set.
|
||||
* precede kvm_put_nested_state() when 'real' nested state is set.
|
||||
*/
|
||||
if (level >= KVM_PUT_RESET_STATE) {
|
||||
ret = kvm_put_msr_feature_control(x86_cpu);
|
||||
@ -5653,7 +5653,7 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
||||
}
|
||||
|
||||
/*
|
||||
* Handled untranslated compatibilty format interrupt with
|
||||
* Handled untranslated compatibility format interrupt with
|
||||
* extended destination ID in the low bits 11-5. */
|
||||
dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
|
||||
|
||||
|
@ -1033,7 +1033,7 @@ static int do_set_periodic_timer(CPUState *target, uint64_t period_ns)
|
||||
#define MILLISECS(_ms) ((int64_t)((_ms) * 1000000ULL))
|
||||
#define MICROSECS(_us) ((int64_t)((_us) * 1000ULL))
|
||||
#define STIME_MAX ((time_t)((int64_t)~0ull >> 1))
|
||||
/* Chosen so (NOW() + delta) wont overflow without an uptime of 200 years */
|
||||
/* Chosen so (NOW() + delta) won't overflow without an uptime of 200 years */
|
||||
#define STIME_DELTA_MAX ((int64_t)((uint64_t)~0ull >> 2))
|
||||
|
||||
static int vcpuop_set_periodic_timer(CPUState *cs, CPUState *target,
|
||||
|
@ -282,12 +282,12 @@ static int cpu_pre_save(void *opaque)
|
||||
* hypervisor, its exception payload (CR2/DR6 on #PF/#DB)
|
||||
* should not be set yet in the respective vCPU register.
|
||||
* Thus, in case an exception is pending, it is
|
||||
* important to save the exception payload seperately.
|
||||
* important to save the exception payload separately.
|
||||
*
|
||||
* Therefore, if an exception is not in a pending state
|
||||
* or vCPU is not in guest-mode, it is not important to
|
||||
* distinguish between a pending and injected exception
|
||||
* and we don't need to store seperately the exception payload.
|
||||
* and we don't need to store separately the exception payload.
|
||||
*
|
||||
* In order to preserve better backwards-compatible migration,
|
||||
* convert a pending exception to an injected exception in
|
||||
|
@ -1069,7 +1069,7 @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
|
||||
}
|
||||
|
||||
/* perform a conditional store into register 'reg' according to jump opcode
|
||||
value 'b'. In the fast case, T0 is guaranted not to be used. */
|
||||
value 'b'. In the fast case, T0 is guaranteed not to be used. */
|
||||
static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
|
||||
{
|
||||
int inv, jcc_op, cond;
|
||||
@ -1202,7 +1202,7 @@ static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
|
||||
}
|
||||
|
||||
/* generate a conditional jump to label 'l1' according to jump opcode
|
||||
value 'b'. In the fast case, T0 is guaranted not to be used. */
|
||||
value 'b'. In the fast case, T0 is guaranteed not to be used. */
|
||||
static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
|
||||
{
|
||||
CCPrepare cc = gen_prepare_cc(s, b, s->T0);
|
||||
@ -1219,7 +1219,7 @@ static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
|
||||
}
|
||||
|
||||
/* Generate a conditional jump to label 'l1' according to jump opcode
|
||||
value 'b'. In the fast case, T0 is guaranted not to be used.
|
||||
value 'b'. In the fast case, T0 is guaranteed not to be used.
|
||||
A translation block must end soon. */
|
||||
static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
|
||||
{
|
||||
@ -5355,7 +5355,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
switch (op) {
|
||||
case 0: /* bt */
|
||||
/* Needs no atomic ops; we surpressed the normal
|
||||
/* Needs no atomic ops; we suppressed the normal
|
||||
memory load for LOCK above so do it now. */
|
||||
gen_op_ld_v(s, ot, s->T0, s->A0);
|
||||
break;
|
||||
|
@ -71,7 +71,7 @@ _start:
|
||||
add $8,%esp
|
||||
|
||||
/*
|
||||
* Don't worry about stack frame, assume everthing
|
||||
* Don't worry about stack frame, assume everything
|
||||
* is garbage when we return, we won't need it.
|
||||
*/
|
||||
call main
|
||||
|
@ -19,7 +19,7 @@
|
||||
#
|
||||
# 4. The instruction encoding. For example, "C1 /4 ib".
|
||||
#
|
||||
# 5. The validity of the instruction in 32-bit (aka compatiblity, legacy) mode.
|
||||
# 5. The validity of the instruction in 32-bit (aka compatibility, legacy) mode.
|
||||
#
|
||||
# 6. The validity of the instruction in 64-bit mode.
|
||||
#
|
||||
|
Can't render this file because it is too large.
|
Loading…
Reference in New Issue
Block a user