trivial patches for 2023-11-16
-----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEe3O61ovnosKJMUsicBtPaxppPlkFAmVVxz4PHG1qdEB0bHMu bXNrLnJ1AAoJEHAbT2saaT5ZI+cH+wexpGPHmmWHaA0moo+1MZPC3pbEvOXq184b oeGRUidq89380DzsxkIxrDn98KisKnIX3oGZ56Q394Ntg7J2xyFN/KsvQhzpElSb 01Ws90NVoHIXoXZKNIOFZXkqOLCB+kwqZ1PFiYwALEJkEPBfpV40dTWuyCnxh1D8 lKHtk5bLKzDbTmDYYfnZ7zkP6CLMhRH7A7evdb/4+W+phbqTHeKbSgq8QhNvVX8n 38yzPTQPlMyXHw7Psio62N7wz86wEiGkYELud1nPPlA902paM5FHMdjYBohm/ZCM 4E12gzMg4SgwBIsWoyE/1tUAjyJXeChocxOVLFqDXXaiYgomAh0= =x0bq -----END PGP SIGNATURE----- Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging trivial patches for 2023-11-16 # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEe3O61ovnosKJMUsicBtPaxppPlkFAmVVxz4PHG1qdEB0bHMu # bXNrLnJ1AAoJEHAbT2saaT5ZI+cH+wexpGPHmmWHaA0moo+1MZPC3pbEvOXq184b # oeGRUidq89380DzsxkIxrDn98KisKnIX3oGZ56Q394Ntg7J2xyFN/KsvQhzpElSb # 01Ws90NVoHIXoXZKNIOFZXkqOLCB+kwqZ1PFiYwALEJkEPBfpV40dTWuyCnxh1D8 # lKHtk5bLKzDbTmDYYfnZ7zkP6CLMhRH7A7evdb/4+W+phbqTHeKbSgq8QhNvVX8n # 38yzPTQPlMyXHw7Psio62N7wz86wEiGkYELud1nPPlA902paM5FHMdjYBohm/ZCM # 4E12gzMg4SgwBIsWoyE/1tUAjyJXeChocxOVLFqDXXaiYgomAh0= # =x0bq # -----END PGP SIGNATURE----- # gpg: Signature made Thu 16 Nov 2023 02:39:42 EST # gpg: using RSA key 7B73BAD68BE7A2C289314B22701B4F6B1A693E59 # gpg: issuer "mjt@tls.msk.ru" # gpg: Good signature from "Michael Tokarev <mjt@tls.msk.ru>" [full] # gpg: aka "Michael Tokarev <mjt@corpit.ru>" [full] # gpg: aka "Michael Tokarev <mjt@debian.org>" [full] # Primary key fingerprint: 6EE1 95D1 886E 8FFB 810D 4324 457C E0A0 8044 65C5 # Subkey fingerprint: 7B73 BAD6 8BE7 A2C2 8931 4B22 701B 4F6B 1A69 3E59 * tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu: (27 commits) util/range.c: spelling fix: inbetween util/filemonitor-inotify.c: spelling fix: kenel tests/qtest/ufs-test.c: spelling fix: tranfer tests/qtest/migration-test.c: spelling fix: bandwith target/riscv/cpu.h: spelling fix: separatly include/hw/virtio/vhost.h: spelling fix: sate include/hw/hyperv/dynmem-proto.h: spelling fix: nunber, atleast include/block/ufs.h: spelling fix: setted hw/net/cadence_gem.c: spelling fixes: Octects hw/mem/memory-device.c: spelling fix: ontaining contrib/vhost-user-gpu/virgl.c: spelling fix: mesage migration/rdma.c: spelling fix: asume target/hppa: spelling fixes: Indicies, Truely target/arm/tcg: spelling fixes: alse, addreses docs/system/arm/emulation.rst: spelling fix: Enhacements docs/devel/migration.rst: spelling fixes: doen't, diferent, responsability, recomend docs/about/deprecated.rst: spelling fix: becase gdbstub: spelling fix: respectivelly hw/cxl: spelling fixes: limitaions, potentialy, intialized linux-user: spelling fixes: othe, necesary ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
95e008b9dd
@ -174,6 +174,7 @@ F: include/hw/core/tcg-cpu-ops.h
|
||||
F: host/include/*/host/cpuinfo.h
|
||||
F: util/cpuinfo-*.c
|
||||
F: include/tcg/
|
||||
F: tests/decode/
|
||||
|
||||
FPU emulation
|
||||
M: Aurelien Jarno <aurelien@aurel32.net>
|
||||
|
@ -235,7 +235,7 @@ static inline abi_long do_obreak(abi_ulong brk_val)
|
||||
return target_brk;
|
||||
}
|
||||
|
||||
/* Release heap if necesary */
|
||||
/* Release heap if necessary */
|
||||
if (new_brk < old_brk) {
|
||||
target_munmap(new_brk, old_brk - new_brk);
|
||||
|
||||
|
@ -115,7 +115,7 @@ abi_long freebsd_exec_common(abi_ulong path_or_fd, abi_ulong guest_argp,
|
||||
}
|
||||
|
||||
qarg0 = argp = g_new0(char *, argc + 9);
|
||||
/* save the first agrument for the emulator */
|
||||
/* save the first argument for the emulator */
|
||||
*argp++ = (char *)getprogname();
|
||||
qargp = argp;
|
||||
*argp++ = (char *)getprogname();
|
||||
|
@ -146,7 +146,7 @@ static inline abi_long do_freebsd_fstatat(abi_long arg1, abi_long arg2,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* undocummented nstat(char *path, struct nstat *ub) syscall */
|
||||
/* undocumented nstat(char *path, struct nstat *ub) syscall */
|
||||
static abi_long do_freebsd11_nstat(abi_long arg1, abi_long arg2)
|
||||
{
|
||||
abi_long ret;
|
||||
@ -162,7 +162,7 @@ static abi_long do_freebsd11_nstat(abi_long arg1, abi_long arg2)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* undocummented nfstat(int fd, struct nstat *sb) syscall */
|
||||
/* undocumented nfstat(int fd, struct nstat *sb) syscall */
|
||||
static abi_long do_freebsd11_nfstat(abi_long arg1, abi_long arg2)
|
||||
{
|
||||
abi_long ret;
|
||||
@ -175,7 +175,7 @@ static abi_long do_freebsd11_nfstat(abi_long arg1, abi_long arg2)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* undocummented nlstat(char *path, struct nstat *ub) syscall */
|
||||
/* undocumented nlstat(char *path, struct nstat *ub) syscall */
|
||||
static abi_long do_freebsd11_nlstat(abi_long arg1, abi_long arg2)
|
||||
{
|
||||
abi_long ret;
|
||||
|
@ -401,7 +401,7 @@ virgl_cmd_set_scanout(VuGpu *g,
|
||||
|
||||
if (g->use_modifiers) {
|
||||
/*
|
||||
* The mesage uses all the fields set in dmabuf_scanout plus
|
||||
* The message uses all the fields set in dmabuf_scanout plus
|
||||
* modifiers which is appended after VhostUserGpuDMABUFScanout.
|
||||
*/
|
||||
msg.request = VHOST_USER_GPU_DMABUF_SCANOUT2;
|
||||
|
@ -514,5 +514,5 @@ old compression method (since 8.2)
|
||||
|
||||
Compression method fails too much. Too many races. We are going to
|
||||
remove it if nobody fixes it. For starters, migration-test
|
||||
compression tests are disabled becase they fail randomly. If you need
|
||||
compression tests are disabled because they fail randomly. If you need
|
||||
compression, use multifd compression methods.
|
||||
|
@ -1061,7 +1061,7 @@ QEMU version, in this case pc-5.1.
|
||||
|
||||
4 - qemu-5.1 -M pc-5.2 -> migrates to -> qemu-5.1 -M pc-5.2
|
||||
|
||||
This combination is not possible as the qemu-5.1 doen't understand
|
||||
This combination is not possible as the qemu-5.1 doesn't understand
|
||||
pc-5.2 machine type. So nothing to worry here.
|
||||
|
||||
Now it comes the interesting ones, when both QEMU processes are
|
||||
@ -1214,8 +1214,8 @@ machine types to have the right value::
|
||||
...
|
||||
};
|
||||
|
||||
A device with diferent features on both sides
|
||||
---------------------------------------------
|
||||
A device with different features on both sides
|
||||
----------------------------------------------
|
||||
|
||||
Let's assume that we are using the same QEMU binary on both sides,
|
||||
just to make the things easier. But we have a device that has
|
||||
@ -1294,12 +1294,12 @@ Host B:
|
||||
|
||||
$ qemu-system-x86_64 -cpu host,taa-no=off
|
||||
|
||||
And you would be able to migrate between them. It is responsability
|
||||
And you would be able to migrate between them. It is responsibility
|
||||
of the management application or of the user to make sure that the
|
||||
configuration is correct. QEMU doesn't know how to look at this kind
|
||||
of features in general.
|
||||
|
||||
Notice that we don't recomend to use -cpu host for migration. It is
|
||||
Notice that we don't recommend to use -cpu host for migration. It is
|
||||
used in this example because it makes the example simpler.
|
||||
|
||||
Other devices have worse control about individual features. If they
|
||||
|
@ -70,7 +70,7 @@ the following architecture extensions:
|
||||
- FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN)
|
||||
- FEAT_PAN3 (Support for SCTLR_ELx.EPAN)
|
||||
- FEAT_PAuth (Pointer authentication)
|
||||
- FEAT_PAuth2 (Enhacements to pointer authentication)
|
||||
- FEAT_PAuth2 (Enhancements to pointer authentication)
|
||||
- FEAT_PMULL (PMULL, PMULL2 instructions)
|
||||
- FEAT_PMUv3p1 (PMU Extensions v3.1)
|
||||
- FEAT_PMUv3p4 (PMU Extensions v3.4)
|
||||
|
@ -692,7 +692,7 @@ static int gdb_handle_vcont(const char *p)
|
||||
/*
|
||||
* target_count and last_target keep track of how many CPUs we are going to
|
||||
* step or resume, and a pointer to the state structure of one of them,
|
||||
* respectivelly
|
||||
* respectively
|
||||
*/
|
||||
int target_count = 0;
|
||||
CPUState *last_target = NULL;
|
||||
|
@ -1126,7 +1126,7 @@ static void virtio_snd_realize(DeviceState *dev, Error **errp)
|
||||
status = virtio_snd_set_pcm_params(vsnd, i, &default_params);
|
||||
if (status != cpu_to_le32(VIRTIO_SND_S_OK)) {
|
||||
error_setg(errp,
|
||||
"Can't initalize stream params, device responded with %s.",
|
||||
"Can't initialize stream params, device responded with %s.",
|
||||
print_code(status));
|
||||
return;
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset,
|
||||
return 0;
|
||||
default:
|
||||
/*
|
||||
* In line with specifiction limitaions on access sizes, this
|
||||
* In line with specification limitaions on access sizes, this
|
||||
* routine is not called with other sizes.
|
||||
*/
|
||||
g_assert_not_reached();
|
||||
@ -152,7 +152,7 @@ static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value,
|
||||
return;
|
||||
default:
|
||||
/*
|
||||
* In line with specifiction limitaions on access sizes, this
|
||||
* In line with specification limitaions on access sizes, this
|
||||
* routine is not called with other sizes.
|
||||
*/
|
||||
g_assert_not_reached();
|
||||
|
@ -431,7 +431,7 @@ static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
|
||||
out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out;
|
||||
*out = (struct cxl_fmapi_ident_switch_dev_resp_pl) {
|
||||
.num_physical_ports = num_phys_ports + 1, /* 1 USP */
|
||||
.num_vcss = 1, /* Not yet support multiple VCS - potentialy tricky */
|
||||
.num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */
|
||||
.active_vcs_bitmask[0] = 0x1,
|
||||
.total_vppbs = num_phys_ports + 1,
|
||||
.bound_vppbs = num_phys_ports + 1,
|
||||
|
@ -29,7 +29,7 @@ static bool memory_device_is_empty(const MemoryDeviceState *md)
|
||||
/* dropping const here is fine as we don't touch the memory region */
|
||||
mr = mdc->get_memory_region((MemoryDeviceState *)md, &local_err);
|
||||
if (local_err) {
|
||||
/* Not empty, we'll report errors later when ontaining the MR again. */
|
||||
/* Not empty, we'll report errors later when containing the MR again. */
|
||||
error_free(local_err);
|
||||
return false;
|
||||
}
|
||||
|
@ -225,8 +225,8 @@ REG32(WOLAN, 0xb8) /* Wake on LAN reg */
|
||||
REG32(IPGSTRETCH, 0xbc) /* IPG Stretch reg */
|
||||
REG32(SVLAN, 0xc0) /* Stacked VLAN reg */
|
||||
REG32(MODID, 0xfc) /* Module ID reg */
|
||||
REG32(OCTTXLO, 0x100) /* Octects transmitted Low reg */
|
||||
REG32(OCTTXHI, 0x104) /* Octects transmitted High reg */
|
||||
REG32(OCTTXLO, 0x100) /* Octets transmitted Low reg */
|
||||
REG32(OCTTXHI, 0x104) /* Octets transmitted High reg */
|
||||
REG32(TXCNT, 0x108) /* Error-free Frames transmitted */
|
||||
REG32(TXBCNT, 0x10c) /* Error-free Broadcast Frames */
|
||||
REG32(TXMCNT, 0x110) /* Error-free Multicast Frame */
|
||||
@ -245,8 +245,8 @@ REG32(EXCESSCOLLCNT, 0x140) /* Excessive Collision Frames */
|
||||
REG32(LATECOLLCNT, 0x144) /* Late Collision Frames */
|
||||
REG32(DEFERTXCNT, 0x148) /* Deferred Transmission Frames */
|
||||
REG32(CSENSECNT, 0x14c) /* Carrier Sense Error Counter */
|
||||
REG32(OCTRXLO, 0x150) /* Octects Received register Low */
|
||||
REG32(OCTRXHI, 0x154) /* Octects Received register High */
|
||||
REG32(OCTRXLO, 0x150) /* Octets Received register Low */
|
||||
REG32(OCTRXHI, 0x154) /* Octets Received register High */
|
||||
REG32(RXCNT, 0x158) /* Error-free Frames Received */
|
||||
REG32(RXBROADCNT, 0x15c) /* Error-free Broadcast Frames RX */
|
||||
REG32(RXMULTICNT, 0x160) /* Error-free Multicast Frames RX */
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "sysemu/watchdog.h"
|
||||
#include "hw/misc/aspeed_scu.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "hw/watchdog/wdt_aspeed.h"
|
||||
|
@ -657,7 +657,7 @@ typedef struct QEMU_PACKED UtpTaskReqDesc {
|
||||
#define UFS_UPIU_MAX_WB_LUN_ID 8
|
||||
|
||||
/*
|
||||
* WriteBooster buffer lifetime has a limit setted by vendor.
|
||||
* WriteBooster buffer lifetime has a limit set by vendor.
|
||||
* If it is over the limit, WriteBooster feature will be disabled.
|
||||
*/
|
||||
#define UFS_WB_EXCEED_LIFETIME 0x0B
|
||||
|
@ -403,7 +403,7 @@ struct CXLType3Dev {
|
||||
CXLComponentState cxl_cstate;
|
||||
CXLDeviceState cxl_dstate;
|
||||
CXLCCI cci; /* Primary PCI mailbox CCI */
|
||||
/* Always intialized as no way to know if a VDM might show up */
|
||||
/* Always initialized as no way to know if a VDM might show up */
|
||||
CXLCCI vdm_fm_owned_ld_mctp_cci;
|
||||
CXLCCI ld0_cci;
|
||||
|
||||
|
@ -232,7 +232,7 @@ struct dm_capabilities_resp_msg {
|
||||
* num_committed: Committed memory in pages.
|
||||
* page_file_size: The accumulated size of all page files
|
||||
* in the system in pages.
|
||||
* zero_free: The nunber of zero and free pages.
|
||||
* zero_free: The number of zero and free pages.
|
||||
* page_file_writes: The writes to the page file in pages.
|
||||
* io_diff: An indicator of file cache efficiency or page file activity,
|
||||
* calculated as File Cache Page Fault Count - Page Read Count.
|
||||
@ -275,7 +275,7 @@ struct dm_balloon {
|
||||
*
|
||||
* reservedz: Reserved; must be set to zero.
|
||||
* more_pages: If FALSE, this is the last message of the transaction.
|
||||
* if TRUE there will atleast one more message from the guest.
|
||||
* if TRUE there will be at least one more message from the guest.
|
||||
*
|
||||
* range_count: The number of ranges in the range array.
|
||||
*
|
||||
@ -296,7 +296,7 @@ struct dm_balloon_response {
|
||||
* to the guest to give guest more memory.
|
||||
*
|
||||
* more_pages: If FALSE, this is the last message of the transaction.
|
||||
* if TRUE there will atleast one more message from the guest.
|
||||
* if TRUE there will be at least one more message from the guest.
|
||||
*
|
||||
* reservedz: Reserved; must be set to zero.
|
||||
*
|
||||
|
@ -456,7 +456,7 @@ int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp);
|
||||
* Must only be called while the device and all its vrings are stopped
|
||||
* (`VHOST_TRANSFER_STATE_PHASE_STOPPED`).
|
||||
*
|
||||
* @dev: The vhost device to which to send the sate
|
||||
* @dev: The vhost device to which to send the state
|
||||
* @f: Migration stream from which to load the state
|
||||
* @errp: Potential error message
|
||||
*
|
||||
|
@ -227,7 +227,7 @@ endf __kernel_sigtramp_rt
|
||||
#ifndef _ARCH_PPC64
|
||||
/*
|
||||
* The non-rt sigreturn has the same layout at a different offset.
|
||||
* Move the CFA and leave all othe other descriptions the same.
|
||||
* Move the CFA and leave all the other descriptions the same.
|
||||
*/
|
||||
.cfi_def_cfa 1, SIGNAL_FRAMESIZE + offsetof_sigframe_mcontext
|
||||
nop
|
||||
|
@ -831,7 +831,7 @@ abi_long do_brk(abi_ulong brk_val)
|
||||
return target_brk;
|
||||
}
|
||||
|
||||
/* Release heap if necesary */
|
||||
/* Release heap if necessary */
|
||||
if (new_brk < old_brk) {
|
||||
target_munmap(new_brk, old_brk - new_brk);
|
||||
|
||||
|
@ -2204,7 +2204,7 @@ retry:
|
||||
stat64_add(&mig_stats.normal_pages, sge.length / qemu_target_page_size());
|
||||
/*
|
||||
* We are adding to transferred the amount of data written, but no
|
||||
* overhead at all. I will asume that RDMA is magicaly and don't
|
||||
* overhead at all. I will assume that RDMA is magicaly and don't
|
||||
* need to transfer (at least) the addresses where it wants to
|
||||
* write the pages. Here it looks like it should be something
|
||||
* like:
|
||||
|
@ -1658,7 +1658,7 @@
|
||||
#
|
||||
# Migration stream channel parameters.
|
||||
#
|
||||
# @channel-type: Channel type for transfering packet information.
|
||||
# @channel-type: Channel type for transferring packet information.
|
||||
#
|
||||
# @addr: Migration endpoint configuration on destination interface.
|
||||
#
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
{ 'pragma': { 'doc-required': true } }
|
||||
|
||||
# Whitelists to permit QAPI rule violations; think twice before you
|
||||
# add to them!
|
||||
# Entries in these lists are allowed to violate the QAPI rules (for
|
||||
# historical reasons); think twice before you add to them!
|
||||
{ 'pragma': {
|
||||
# Command names containing '_'
|
||||
'command-name-exceptions': [
|
||||
|
@ -1414,7 +1414,7 @@ void HELPER(setge)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
|
||||
/*
|
||||
* Perform part of a memory copy from the guest memory at fromaddr
|
||||
* and extending for copysize bytes, to the guest memory at
|
||||
* toaddr. Both addreses are dirty.
|
||||
* toaddr. Both addresses are dirty.
|
||||
*
|
||||
* Returns the number of bytes actually set, which might be less than
|
||||
* copysize; the caller should loop until the whole copy has been done.
|
||||
|
@ -327,7 +327,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
||||
DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
|
||||
}
|
||||
/*
|
||||
* For unpriv tag-setting accesses we alse need ATA0. Again, in
|
||||
* For unpriv tag-setting accesses we also need ATA0. Again, in
|
||||
* contexts where unpriv and normal insns are the same we
|
||||
* duplicate the ATA bit to save effort for translate-a64.c.
|
||||
*/
|
||||
|
@ -56,7 +56,7 @@
|
||||
1 << MMU_PL2_IDX | 1 << MMU_PL2_P_IDX | \
|
||||
1 << MMU_USER_IDX | 1 << MMU_USER_P_IDX)
|
||||
|
||||
/* Indicies to flush for access_id changes. */
|
||||
/* Indices to flush for access_id changes. */
|
||||
#define HPPA_MMU_FLUSH_P_MASK \
|
||||
(1 << MMU_KERNEL_P_IDX | 1 << MMU_PL1_P_IDX | \
|
||||
1 << MMU_PL2_P_IDX | 1 << MMU_USER_P_IDX)
|
||||
|
@ -129,7 +129,7 @@ static int tlb_post_load(void *opaque, int version_id)
|
||||
|
||||
/*
|
||||
* Re-create the interval tree from the valid entries.
|
||||
* Truely invalid entries should have start == end == 0.
|
||||
* Truly invalid entries should have start == end == 0.
|
||||
* Otherwise it should be the in-flight tlb_partial entry.
|
||||
*/
|
||||
for (uint32_t i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
|
||||
|
@ -214,13 +214,13 @@ struct CPUArchState {
|
||||
|
||||
/*
|
||||
* When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
|
||||
* alias of mie[i] and needs to be maintained separatly.
|
||||
* alias of mie[i] and needs to be maintained separately.
|
||||
*/
|
||||
uint64_t sie;
|
||||
|
||||
/*
|
||||
* When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
|
||||
* alias of sie[i] (mie[i]) and needs to be maintained separatly.
|
||||
* alias of sie[i] (mie[i]) and needs to be maintained separately.
|
||||
*/
|
||||
uint64_t vsie;
|
||||
|
||||
|
@ -1866,10 +1866,6 @@ blackfin: 4096
|
||||
blackfin.c: 7552
|
||||
blackfin.h: 1089
|
||||
blackfin_sram.h: 1207
|
||||
blacklist.c: 8658
|
||||
blacklist.h: 108
|
||||
blackstamp.c: 9838
|
||||
BlackStamp_defconfig: 27434
|
||||
blinken.h: 617
|
||||
blizzard.c: 41338
|
||||
blizzard.h: 249
|
||||
|
@ -3138,7 +3138,7 @@ static void test_migrate_dirty_limit(void)
|
||||
uint64_t throttle_us_per_full;
|
||||
/*
|
||||
* We want the test to be stable and as fast as possible.
|
||||
* E.g., with 1Gb/s bandwith migration may pass without dirty limit,
|
||||
* E.g., with 1Gb/s bandwidth migration may pass without dirty limit,
|
||||
* so we need to decrease a bandwidth.
|
||||
*/
|
||||
const int64_t dirtylimit_period = 1000, dirtylimit_value = 50;
|
||||
|
@ -319,7 +319,7 @@ static void ufs_init(QUfs *ufs, QGuestAllocator *alloc)
|
||||
ufs_wreg(ufs, A_IE, ie);
|
||||
ufs_wreg(ufs, A_UTRIACR, 0);
|
||||
|
||||
/* Enable tranfer request and task management request */
|
||||
/* Enable transfer request and task management request */
|
||||
cap = ufs_rreg(ufs, A_CAP);
|
||||
nutrs = FIELD_EX32(cap, CAP, NUTRS) + 1;
|
||||
nutmrs = FIELD_EX32(cap, CAP, NUTMRS) + 1;
|
||||
|
@ -89,7 +89,7 @@ static void qemu_file_monitor_watch(void *arg)
|
||||
struct inotify_event *ev = (struct inotify_event *)(buf + used);
|
||||
|
||||
/*
|
||||
* We trust the kenel to provide valid buffer with complete event
|
||||
* We trust the kernel to provide valid buffer with complete event
|
||||
* records.
|
||||
*/
|
||||
assert(len - used >= sizeof(struct inotify_event));
|
||||
|
@ -98,7 +98,7 @@ void range_inverse_array(GList *in, GList **rev,
|
||||
out = append_new_range(out, low, MIN(range_lob(r) - 1, high));
|
||||
}
|
||||
|
||||
/* insert a range inbetween each original range until we reach high */
|
||||
/* insert a range in between each original range until we reach high */
|
||||
for (; l->next; l = l->next) {
|
||||
r = (Range *)l->data;
|
||||
rn = (Range *)l->next->data;
|
||||
|
Loading…
Reference in New Issue
Block a user