Merge upstream master
This commit is contained in:
commit
2308e3519b
27
.cirrus.yml
27
.cirrus.yml
|
@ -9,39 +9,41 @@ freebsd_12_task:
|
|||
install_script:
|
||||
- ASSUME_ALWAYS_YES=yes pkg bootstrap -f ;
|
||||
- pkg install -y bash curl cyrus-sasl git glib gmake gnutls gsed
|
||||
nettle perl5 pixman pkgconf png usbredir
|
||||
nettle perl5 pixman pkgconf png usbredir ninja
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-werror || { cat config.log; exit 1; }
|
||||
- ../configure --enable-werror || { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- gmake -j$(sysctl -n hw.ncpu)
|
||||
- gmake -j$(sysctl -n hw.ncpu) check V=1
|
||||
|
||||
macos_task:
|
||||
timeout_in: 90m
|
||||
osx_instance:
|
||||
image: catalina-base
|
||||
install_script:
|
||||
- brew install pkg-config python gnu-sed glib pixman make sdl2 bash
|
||||
- brew install pkg-config python gnu-sed glib pixman make sdl2 bash ninja
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --python=/usr/local/bin/python3 --enable-werror
|
||||
--extra-cflags='-Wno-error=deprecated-declarations'
|
||||
|| { cat config.log; exit 1; }
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- gmake -j$(sysctl -n hw.ncpu)
|
||||
- gmake check V=1
|
||||
|
||||
macos_xcode_task:
|
||||
timeout_in: 90m
|
||||
osx_instance:
|
||||
# this is an alias for the latest Xcode
|
||||
image: catalina-xcode
|
||||
install_script:
|
||||
- brew install pkg-config gnu-sed glib pixman make sdl2 bash
|
||||
- brew install pkg-config gnu-sed glib pixman make sdl2 bash ninja
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --extra-cflags='-Wno-error=deprecated-declarations'
|
||||
--enable-werror --cc=clang || { cat config.log; exit 1; }
|
||||
- ../configure --extra-cflags='-Wno-error=deprecated-declarations' --enable-modules
|
||||
--enable-werror --cc=clang || { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- gmake -j$(sysctl -n hw.ncpu)
|
||||
- gmake check V=1
|
||||
|
||||
|
@ -76,7 +78,6 @@ windows_msys2_task:
|
|||
((Get-Content -path C:\tools\msys64\etc\\post-install\\07-pacman-key.post -Raw) -replace '--refresh-keys', '--version') | Set-Content -Path C:\tools\msys64\etc\\post-install\\07-pacman-key.post
|
||||
C:\tools\msys64\usr\bin\bash.exe -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf"
|
||||
C:\tools\msys64\usr\bin\bash.exe -lc "export"
|
||||
C:\tools\msys64\usr\bin\bash.exe -lc "grep -rl 'repo.msys2.org/' /etc/pacman.d/mirrorlist.* | xargs sed -i 's/repo.msys2.org\//mirrors.tuna.tsinghua.edu.cn\/msys2\//g'"
|
||||
C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Sy
|
||||
echo Y | C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Suu --overwrite=*
|
||||
taskkill /F /FI "MODULES eq msys-2.0.dll"
|
||||
|
@ -86,7 +87,6 @@ windows_msys2_task:
|
|||
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -S --needed \
|
||||
diffutils git grep make pkg-config sed \
|
||||
mingw-w64-x86_64-python \
|
||||
mingw-w64-x86_64-python-setuptools \
|
||||
mingw-w64-x86_64-toolchain \
|
||||
mingw-w64-x86_64-SDL2 \
|
||||
mingw-w64-x86_64-SDL2_image \
|
||||
|
@ -110,7 +110,13 @@ windows_msys2_task:
|
|||
mingw-w64-x86_64-cyrus-sasl \
|
||||
mingw-w64-x86_64-curl \
|
||||
mingw-w64-x86_64-gnutls \
|
||||
mingw-w64-x86_64-libnfs \
|
||||
"
|
||||
bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND `
|
||||
https://repo.msys2.org/mingw/x86_64/mingw-w64-x86_64-python-sphinx-2.3.1-1-any.pkg.tar.xz `
|
||||
C:\tools\mingw-w64-x86_64-python-sphinx-2.3.1-1-any.pkg.tar.xz
|
||||
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -U /c/tools/mingw-w64-x86_64-python-sphinx-2.3.1-1-any.pkg.tar.xz"
|
||||
del C:\tools\mingw-w64-x86_64-python-sphinx-2.3.1-1-any.pkg.tar.xz
|
||||
C:\tools\msys64\usr\bin\bash.exe -lc "rm -rf /var/cache/pacman/pkg/*"
|
||||
cd C:\tools\msys64
|
||||
echo "Start archive"
|
||||
|
@ -123,8 +129,7 @@ windows_msys2_task:
|
|||
|
||||
script:
|
||||
- C:\tools\msys64\usr\bin\bash.exe -lc "mkdir build"
|
||||
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && ../configure
|
||||
--python=python3 --ninja=ninja"
|
||||
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && ../configure --python=python3"
|
||||
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make -j8"
|
||||
test_script:
|
||||
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make V=1 check"
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
*.c.inc diff=c
|
||||
*.h.inc diff=c
|
|
@ -0,0 +1,94 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# check-dco.py: validate all commits are signed off
|
||||
#
|
||||
# Copyright (C) 2020 Red Hat, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
namespace = "qemu-project"
|
||||
if len(sys.argv) >= 2:
|
||||
namespace = sys.argv[1]
|
||||
|
||||
cwd = os.getcwd()
|
||||
reponame = os.path.basename(cwd)
|
||||
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
|
||||
|
||||
subprocess.check_call(["git", "remote", "add", "check-dco", repourl])
|
||||
subprocess.check_call(["git", "fetch", "check-dco", "master"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL)
|
||||
|
||||
ancestor = subprocess.check_output(["git", "merge-base",
|
||||
"check-dco/master", "HEAD"],
|
||||
universal_newlines=True)
|
||||
|
||||
ancestor = ancestor.strip()
|
||||
|
||||
subprocess.check_call(["git", "remote", "rm", "check-dco"])
|
||||
|
||||
errors = False
|
||||
|
||||
print("\nChecking for 'Signed-off-by: NAME <EMAIL>' " +
|
||||
"on all commits since %s...\n" % ancestor)
|
||||
|
||||
log = subprocess.check_output(["git", "log", "--format=%H %s",
|
||||
ancestor + "..."],
|
||||
universal_newlines=True)
|
||||
|
||||
if log == "":
|
||||
commits = []
|
||||
else:
|
||||
commits = [[c[0:40], c[41:]] for c in log.strip().split("\n")]
|
||||
|
||||
for sha, subject in commits:
|
||||
|
||||
msg = subprocess.check_output(["git", "show", "-s", sha],
|
||||
universal_newlines=True)
|
||||
lines = msg.strip().split("\n")
|
||||
|
||||
print("🔍 %s %s" % (sha, subject))
|
||||
sob = False
|
||||
for line in lines:
|
||||
if "Signed-off-by:" in line:
|
||||
sob = True
|
||||
if "localhost" in line:
|
||||
print(" ❌ FAIL: bad email in %s" % line)
|
||||
errors = True
|
||||
|
||||
if not sob:
|
||||
print(" ❌ FAIL missing Signed-off-by tag")
|
||||
errors = True
|
||||
|
||||
if errors:
|
||||
print("""
|
||||
|
||||
❌ ERROR: One or more commits are missing a valid Signed-off-By tag.
|
||||
|
||||
|
||||
This project requires all contributors to assert that their contributions
|
||||
are provided in compliance with the terms of the Developer's Certificate
|
||||
of Origin 1.1 (DCO):
|
||||
|
||||
https://developercertificate.org/
|
||||
|
||||
To indicate acceptance of the DCO every commit must have a tag
|
||||
|
||||
Signed-off-by: REAL NAME <EMAIL>
|
||||
|
||||
This can be achieved by passing the "-s" flag to the "git commit" command.
|
||||
|
||||
To bulk update all commits on current branch "git rebase" can be used:
|
||||
|
||||
git rebase -i master -x 'git commit --amend --no-edit -s'
|
||||
|
||||
""")
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
|
@ -0,0 +1,56 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# check-patch.py: run checkpatch.pl across all commits in a branch
|
||||
#
|
||||
# Copyright (C) 2020 Red Hat, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
namespace = "qemu-project"
|
||||
if len(sys.argv) >= 2:
|
||||
namespace = sys.argv[1]
|
||||
|
||||
cwd = os.getcwd()
|
||||
reponame = os.path.basename(cwd)
|
||||
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
|
||||
|
||||
# GitLab CI environment does not give us any direct info about the
|
||||
# base for the user's branch. We thus need to figure out a common
|
||||
# ancestor between the user's branch and current git master.
|
||||
subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
|
||||
subprocess.check_call(["git", "fetch", "check-patch", "master"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL)
|
||||
|
||||
ancestor = subprocess.check_output(["git", "merge-base",
|
||||
"check-patch/master", "HEAD"],
|
||||
universal_newlines=True)
|
||||
|
||||
ancestor = ancestor.strip()
|
||||
|
||||
log = subprocess.check_output(["git", "log", "--format=%H %s",
|
||||
ancestor + "..."],
|
||||
universal_newlines=True)
|
||||
|
||||
subprocess.check_call(["git", "remote", "rm", "check-patch"])
|
||||
|
||||
if log == "":
|
||||
print("\nNo commits since %s, skipping checks\n" % ancestor)
|
||||
sys.exit(0)
|
||||
|
||||
errors = False
|
||||
|
||||
print("\nChecking all commits since %s...\n" % ancestor, flush=True)
|
||||
|
||||
ret = subprocess.run(["scripts/checkpatch.pl", "--terse", ancestor + "..."])
|
||||
|
||||
if ret.returncode != 0:
|
||||
print(" ❌ FAIL one or more commits failed scripts/checkpatch.pl")
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
|
@ -8,7 +8,7 @@
|
|||
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
|
||||
- apk add python3
|
||||
- docker info
|
||||
- docker login registry.gitlab.com -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
|
||||
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
|
||||
script:
|
||||
- echo "TAG:$TAG"
|
||||
- echo "COMMON_TAG:$COMMON_TAG"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
|
||||
.cross_system_build_job_template: &cross_system_build_job_definition
|
||||
.cross_system_build_job:
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
timeout: 80m
|
||||
|
@ -13,7 +12,24 @@
|
|||
xtensa-softmmu"
|
||||
- make -j$(expr $(nproc) + 1) all check-build
|
||||
|
||||
.cross_user_build_job_template: &cross_user_build_job_definition
|
||||
# Job to cross-build specific accelerators.
|
||||
#
|
||||
# Set the $ACCEL variable to select the specific accelerator (default to
|
||||
# KVM), and set extra options (such disabling other accelerators) via the
|
||||
# $ACCEL_CONFIGURE_OPTS variable.
|
||||
.cross_accel_build_job:
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
timeout: 30m
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
|
||||
../configure --enable-werror $QEMU_CONFIGURE_OPTS --disable-tools
|
||||
--enable-${ACCEL:-kvm} $ACCEL_CONFIGURE_OPTS
|
||||
- make -j$(expr $(nproc) + 1) all check-build
|
||||
|
||||
.cross_user_build_job:
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
script:
|
||||
|
@ -24,91 +40,111 @@
|
|||
- make -j$(expr $(nproc) + 1) all check-build
|
||||
|
||||
cross-armel-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: debian-armel-cross
|
||||
|
||||
cross-armel-user:
|
||||
<<: *cross_user_build_job_definition
|
||||
extends: .cross_user_build_job
|
||||
variables:
|
||||
IMAGE: debian-armel-cross
|
||||
|
||||
cross-armhf-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: debian-armhf-cross
|
||||
|
||||
cross-armhf-user:
|
||||
<<: *cross_user_build_job_definition
|
||||
extends: .cross_user_build_job
|
||||
variables:
|
||||
IMAGE: debian-armhf-cross
|
||||
|
||||
cross-arm64-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
|
||||
cross-arm64-user:
|
||||
<<: *cross_user_build_job_definition
|
||||
extends: .cross_user_build_job
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
|
||||
cross-mips-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: debian-mips-cross
|
||||
|
||||
cross-mips-user:
|
||||
<<: *cross_user_build_job_definition
|
||||
extends: .cross_user_build_job
|
||||
variables:
|
||||
IMAGE: debian-mips-cross
|
||||
|
||||
cross-mipsel-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: debian-mipsel-cross
|
||||
|
||||
cross-mipsel-user:
|
||||
<<: *cross_user_build_job_definition
|
||||
extends: .cross_user_build_job
|
||||
variables:
|
||||
IMAGE: debian-mipsel-cross
|
||||
|
||||
cross-mips64el-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: debian-mips64el-cross
|
||||
|
||||
cross-mips64el-user:
|
||||
<<: *cross_user_build_job_definition
|
||||
extends: .cross_user_build_job
|
||||
variables:
|
||||
IMAGE: debian-mips64el-cross
|
||||
|
||||
cross-ppc64el-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: debian-ppc64el-cross
|
||||
|
||||
cross-ppc64el-user:
|
||||
<<: *cross_user_build_job_definition
|
||||
extends: .cross_user_build_job
|
||||
variables:
|
||||
IMAGE: debian-ppc64el-cross
|
||||
|
||||
cross-s390x-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: debian-s390x-cross
|
||||
|
||||
cross-s390x-user:
|
||||
<<: *cross_user_build_job_definition
|
||||
extends: .cross_user_build_job
|
||||
variables:
|
||||
IMAGE: debian-s390x-cross
|
||||
|
||||
cross-s390x-kvm-only:
|
||||
extends: .cross_accel_build_job
|
||||
variables:
|
||||
IMAGE: debian-s390x-cross
|
||||
ACCEL_CONFIGURE_OPTS: --disable-tcg
|
||||
|
||||
cross-win32-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: fedora-win32-cross
|
||||
|
||||
cross-win64-system:
|
||||
<<: *cross_system_build_job_definition
|
||||
extends: .cross_system_build_job
|
||||
variables:
|
||||
IMAGE: fedora-win64-cross
|
||||
|
||||
cross-amd64-xen-only:
|
||||
extends: .cross_accel_build_job
|
||||
variables:
|
||||
IMAGE: debian-amd64-cross
|
||||
ACCEL: xen
|
||||
ACCEL_CONFIGURE_OPTS: --disable-tcg --disable-kvm
|
||||
|
||||
cross-arm64-xen-only:
|
||||
extends: .cross_accel_build_job
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
ACCEL: xen
|
||||
ACCEL_CONFIGURE_OPTS: --disable-tcg --disable-kvm
|
||||
|
|
225
.gitlab-ci.yml
225
.gitlab-ci.yml
|
@ -7,12 +7,6 @@ stages:
|
|||
- build
|
||||
- test
|
||||
|
||||
# We assume GitLab has it's own caching set up for RPM/APT repositories so we
|
||||
# just take care of avocado assets here.
|
||||
cache:
|
||||
paths:
|
||||
- $HOME/avocado/data/cache
|
||||
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/edk2.yml'
|
||||
- local: '/.gitlab-ci.d/opensbi.yml'
|
||||
|
@ -24,6 +18,7 @@ include:
|
|||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
before_script:
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
- sed -i s,git.qemu.org/git,gitlab.com/qemu-project, .gitmodules
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
|
@ -32,7 +27,7 @@ include:
|
|||
../configure --enable-werror $CONFIGURE_ARGS --target-list="$TARGETS" ;
|
||||
else
|
||||
../configure --enable-werror $CONFIGURE_ARGS ;
|
||||
fi
|
||||
fi || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
|
@ -53,6 +48,11 @@ include:
|
|||
paths:
|
||||
- ${CI_PROJECT_DIR}/avocado-cache
|
||||
policy: pull-push
|
||||
artifacts:
|
||||
paths:
|
||||
- build/tests/results/latest/results.xml
|
||||
reports:
|
||||
junit: build/tests/results/latest/results.xml
|
||||
before_script:
|
||||
- mkdir -p ~/.config/avocado
|
||||
- echo "[datadir.paths]" > ~/.config/avocado/avocado.conf
|
||||
|
@ -61,9 +61,10 @@ include:
|
|||
- if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
|
||||
du -chs ${CI_PROJECT_DIR}/avocado-cache ;
|
||||
fi
|
||||
- export AVOCADO_ALLOW_UNTRUSTED_CODE=1
|
||||
after_script:
|
||||
- cd build
|
||||
- python3 -c 'import json; r = json.load(open("tests/results/latest/results.json")); [print(t["logfile"]) for t in r["tests"] if t["status"] not in ("PASS", "SKIP")]' | xargs cat
|
||||
- python3 -c 'import json; r = json.load(open("tests/results/latest/results.json")); [print(t["logfile"]) for t in r["tests"] if t["status"] not in ("PASS", "SKIP", "CANCEL")]' | xargs cat
|
||||
- du -chs ${CI_PROJECT_DIR}/avocado-cache
|
||||
|
||||
build-system-ubuntu:
|
||||
|
@ -73,6 +74,7 @@ build-system-ubuntu:
|
|||
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
|
||||
moxie-softmmu microblazeel-softmmu mips64el-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
CONFIGURE_ARGS: --enable-docs
|
||||
artifacts:
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
|
@ -104,6 +106,7 @@ build-system-debian:
|
|||
TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
|
||||
riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
CONFIGURE_ARGS: --enable-docs
|
||||
artifacts:
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
|
@ -132,7 +135,7 @@ build-system-fedora:
|
|||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
|
||||
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
|
||||
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
@ -196,27 +199,84 @@ build-disabled:
|
|||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --disable-attr --disable-avx2 --disable-bochs
|
||||
--disable-brlapi --disable-bzip2 --disable-cap-ng --disable-capstone
|
||||
--disable-cloop --disable-coroutine-pool --disable-curl --disable-curses
|
||||
--disable-dmg --disable-docs --disable-glusterfs --disable-gnutls
|
||||
--disable-gtk --disable-guest-agent --disable-iconv --disable-kvm
|
||||
--disable-libiscsi --disable-libpmem --disable-libssh --disable-libusb
|
||||
--disable-libxml2 --disable-linux-aio --disable-live-block-migration
|
||||
--disable-lzo --disable-malloc-trim --disable-mpath --disable-nettle
|
||||
--disable-numa --disable-parallels --disable-pie --disable-qcow1
|
||||
--disable-qed --disable-qom-cast-debug --disable-rbd --disable-rdma
|
||||
--disable-replication --disable-sdl --disable-seccomp --disable-sheepdog
|
||||
--disable-slirp --disable-smartcard --disable-snappy --disable-spice
|
||||
--disable-strip --disable-tpm --disable-usb-redir --disable-vdi
|
||||
--disable-vhost-crypto --disable-vhost-net --disable-vhost-scsi
|
||||
--disable-vhost-user --disable-vhost-vdpa --disable-vhost-vsock
|
||||
--disable-virglrenderer --disable-vnc --disable-vte --disable-vvfat
|
||||
--disable-xen --disable-zstd
|
||||
CONFIGURE_ARGS:
|
||||
--disable-attr
|
||||
--disable-auth-pam
|
||||
--disable-avx2
|
||||
--disable-bochs
|
||||
--disable-brlapi
|
||||
--disable-bzip2
|
||||
--disable-cap-ng
|
||||
--disable-capstone
|
||||
--disable-cloop
|
||||
--disable-coroutine-pool
|
||||
--disable-curl
|
||||
--disable-curses
|
||||
--disable-dmg
|
||||
--disable-docs
|
||||
--disable-gcrypt
|
||||
--disable-glusterfs
|
||||
--disable-gnutls
|
||||
--disable-gtk
|
||||
--disable-guest-agent
|
||||
--disable-iconv
|
||||
--disable-keyring
|
||||
--disable-kvm
|
||||
--disable-libiscsi
|
||||
--disable-libpmem
|
||||
--disable-libssh
|
||||
--disable-libudev
|
||||
--disable-libusb
|
||||
--disable-libxml2
|
||||
--disable-linux-aio
|
||||
--disable-live-block-migration
|
||||
--disable-lzo
|
||||
--disable-malloc-trim
|
||||
--disable-mpath
|
||||
--disable-nettle
|
||||
--disable-numa
|
||||
--disable-opengl
|
||||
--disable-parallels
|
||||
--disable-pie
|
||||
--disable-qcow1
|
||||
--disable-qed
|
||||
--disable-qom-cast-debug
|
||||
--disable-rbd
|
||||
--disable-rdma
|
||||
--disable-replication
|
||||
--disable-sdl
|
||||
--disable-seccomp
|
||||
--disable-sheepdog
|
||||
--disable-slirp
|
||||
--disable-smartcard
|
||||
--disable-snappy
|
||||
--disable-sparse
|
||||
--disable-spice
|
||||
--disable-strip
|
||||
--disable-tpm
|
||||
--disable-usb-redir
|
||||
--disable-vdi
|
||||
--disable-vhost-crypto
|
||||
--disable-vhost-net
|
||||
--disable-vhost-scsi
|
||||
--disable-vhost-user
|
||||
--disable-vhost-vdpa
|
||||
--disable-vhost-vsock
|
||||
--disable-virglrenderer
|
||||
--disable-vnc
|
||||
--disable-vte
|
||||
--disable-vvfat
|
||||
--disable-xen
|
||||
--disable-zstd
|
||||
TARGETS: arm-softmmu i386-softmmu ppc64-softmmu mips64-softmmu
|
||||
s390x-softmmu i386-linux-user
|
||||
MAKE_CHECK_ARGS: check-qtest SPEED=slow
|
||||
|
||||
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
|
||||
# the configure script. The container doesn't contain Xen headers so
|
||||
# Xen accelerator is not detected / selected. As result it build the
|
||||
# i386-softmmu and x86_64-softmmu with KVM being the single accelerator
|
||||
# available.
|
||||
build-tcg-disabled:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
|
@ -224,7 +284,7 @@ build-tcg-disabled:
|
|||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-tcg --audio-drv-list=""
|
||||
- ../configure --disable-tcg --audio-drv-list="" || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- make check-unit
|
||||
- make check-qapi-schema
|
||||
|
@ -232,7 +292,7 @@ build-tcg-disabled:
|
|||
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
|
||||
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
|
||||
170 171 183 184 192 194 197 208 215 221 222 226 227 236 253 277
|
||||
- ./check -qcow2 028 051 056 057 058 065 067 068 082 085 091 095 096 102 122
|
||||
- ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
|
||||
124 132 139 142 144 145 151 152 155 157 165 194 196 197 200 202
|
||||
208 209 215 216 218 222 227 234 246 247 248 250 254 255 257 258
|
||||
260 261 262 263 264 270 272 273 277 279
|
||||
|
@ -244,6 +304,22 @@ build-user:
|
|||
CONFIGURE_ARGS: --disable-tools --disable-system
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-static:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system --static
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# Only build the softmmu targets we have check-tcg tests for
|
||||
build-some-softmmu:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug-tcg
|
||||
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# Run check-tcg against linux-user (with plugins)
|
||||
# we skip sparc64-linux-user until it has been fixed somewhat
|
||||
# we skip cris-linux-user as it doesn't use the common run loop
|
||||
|
@ -255,6 +331,14 @@ build-user-plugins:
|
|||
MAKE_CHECK_ARGS: check-tcg
|
||||
timeout: 1h 30m
|
||||
|
||||
build-some-softmmu-plugins:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-user --enable-plugins --enable-debug-tcg
|
||||
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-clang:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
|
@ -303,7 +387,7 @@ build-oss-fuzz:
|
|||
| grep -v slirp); do
|
||||
grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ;
|
||||
echo Testing ${fuzzer} ... ;
|
||||
"${fuzzer}" -runs=1000 -seed=1 || exit 1 ;
|
||||
"${fuzzer}" -runs=1 -seed=1 || exit 1 ;
|
||||
done
|
||||
# Unrelated to fuzzer: run some tests with -fsanitize=address
|
||||
- cd build-oss-fuzz && make check-qtest-i386 check-unit
|
||||
|
@ -317,7 +401,7 @@ build-tci:
|
|||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter
|
||||
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
|
||||
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)" || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- make run-tcg-tests-x86_64-softmmu
|
||||
- make tests/qtest/boot-serial-test tests/qtest/cdrom-test tests/qtest/pxe-test
|
||||
|
@ -329,6 +413,22 @@ build-tci:
|
|||
- QTEST_QEMU_BINARY="./qemu-system-x86_64" ./tests/qtest/pxe-test
|
||||
- QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow
|
||||
|
||||
# Alternate coroutines implementations are only really of interest to KVM users
|
||||
# However we can't test against KVM on Gitlab-CI so we can only run unit tests
|
||||
build-coroutine-ucontext:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: ubuntu2004
|
||||
CONFIGURE_ARGS: --with-coroutine=ucontext --disable-tcg
|
||||
MAKE_CHECK_ARGS: check-unit
|
||||
|
||||
build-coroutine-sigaltstack:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: ubuntu2004
|
||||
CONFIGURE_ARGS: --with-coroutine=sigaltstack --disable-tcg
|
||||
MAKE_CHECK_ARGS: check-unit
|
||||
|
||||
# Most jobs test latest gcrypt or nettle builds
|
||||
#
|
||||
# These jobs test old gcrypt and nettle from RHEL7
|
||||
|
@ -394,3 +494,68 @@ check-crypto-only-gnutls:
|
|||
variables:
|
||||
IMAGE: centos7
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
# We don't need to exercise every backend with every front-end
|
||||
build-trace-multi-user:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: ubuntu2004
|
||||
CONFIGURE_ARGS: --enable-trace-backends=log,simple,syslog --disable-system
|
||||
|
||||
build-trace-ftrace-system:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: ubuntu2004
|
||||
CONFIGURE_ARGS: --enable-trace-backends=ftrace --target-list=x86_64-softmmu
|
||||
|
||||
build-trace-ust-system:
|
||||
<<: *native_build_job_definition
|
||||
variables:
|
||||
IMAGE: ubuntu2004
|
||||
CONFIGURE_ARGS: --enable-trace-backends=ust --target-list=x86_64-softmmu
|
||||
|
||||
check-patch:
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/centos8:latest
|
||||
script: .gitlab-ci.d/check-patch.py
|
||||
except:
|
||||
variables:
|
||||
- $CI_PROJECT_NAMESPACE == 'qemu-project' && $CI_COMMIT_BRANCH == 'master'
|
||||
variables:
|
||||
GIT_DEPTH: 1000
|
||||
allow_failure: true
|
||||
|
||||
check-dco:
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/centos8:latest
|
||||
script: .gitlab-ci.d/check-dco.py
|
||||
except:
|
||||
variables:
|
||||
- $CI_PROJECT_NAMESPACE == 'qemu-project' && $CI_COMMIT_BRANCH == 'master'
|
||||
variables:
|
||||
GIT_DEPTH: 1000
|
||||
|
||||
build-libvhost-user:
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
|
||||
before_script:
|
||||
- dnf install -y meson ninja-build
|
||||
script:
|
||||
- mkdir subprojects/libvhost-user/build
|
||||
- cd subprojects/libvhost-user/build
|
||||
- meson
|
||||
- ninja
|
||||
|
||||
pages:
|
||||
image: $CI_REGISTRY_IMAGE/qemu/ubuntu2004:latest
|
||||
stage: test
|
||||
needs:
|
||||
- job: build-system-ubuntu
|
||||
artifacts: true
|
||||
script:
|
||||
- mkdir public
|
||||
- mv build/docs/index.html public/
|
||||
- for i in devel interop specs system tools user ; do mv build/docs/$i public/ ; done
|
||||
artifacts:
|
||||
paths:
|
||||
- public
|
||||
|
|
2
.mailmap
2
.mailmap
|
@ -49,6 +49,8 @@ Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
|
|||
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
|
||||
Frederic Konrad <konrad@adacore.com> <fred.konrad@greensocs.com>
|
||||
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
|
||||
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
|
||||
Leif Lindholm <leif@nuviainc.com> <leif.lindholm@linaro.org>
|
||||
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
|
||||
|
|
85
.travis.yml
85
.travis.yml
|
@ -49,6 +49,7 @@ addons:
|
|||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- libzstd-dev
|
||||
- ninja-build
|
||||
- sparse
|
||||
- uuid-dev
|
||||
- gcovr
|
||||
|
@ -94,7 +95,7 @@ before_install:
|
|||
# Configure step - may be overridden
|
||||
before_script:
|
||||
- mkdir -p ${BUILD_DIR} && cd ${BUILD_DIR}
|
||||
- ${SRC_DIR}/configure ${BASE_CONFIG} ${CONFIG} || { cat config.log && exit 1; }
|
||||
- ${SRC_DIR}/configure ${BASE_CONFIG} ${CONFIG} || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
|
||||
# Main build & test - rarely overridden - controlled by TEST_CMD
|
||||
script:
|
||||
|
@ -118,11 +119,6 @@ after_script:
|
|||
|
||||
jobs:
|
||||
include:
|
||||
- name: "GCC static (user)"
|
||||
env:
|
||||
- CONFIG="--disable-system --static"
|
||||
- CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-default"
|
||||
|
||||
# Just build tools and run minimal unit and softfloat checks
|
||||
- name: "GCC check-unit and check-softfloat"
|
||||
env:
|
||||
|
@ -152,20 +148,6 @@ jobs:
|
|||
- CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-default"
|
||||
|
||||
|
||||
# Alternate coroutines implementations are only really of interest to KVM users
|
||||
# However we can't test against KVM on Travis so we can only run unit tests
|
||||
- name: "check-unit coroutine=ucontext"
|
||||
env:
|
||||
- CONFIG="--with-coroutine=ucontext --disable-tcg"
|
||||
- TEST_CMD="make check-unit -j${JOBS} V=1"
|
||||
|
||||
|
||||
- name: "check-unit coroutine=sigaltstack"
|
||||
env:
|
||||
- CONFIG="--with-coroutine=sigaltstack --disable-tcg"
|
||||
- TEST_CMD="make check-unit -j${JOBS} V=1"
|
||||
|
||||
|
||||
# Check we can build docs and tools (out of tree)
|
||||
- name: "tools and docs (bionic)"
|
||||
dist: bionic
|
||||
|
@ -177,6 +159,7 @@ jobs:
|
|||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- ninja-build
|
||||
- python3-sphinx
|
||||
- perl
|
||||
|
||||
|
@ -197,7 +180,7 @@ jobs:
|
|||
compiler: clang
|
||||
before_script:
|
||||
- mkdir -p ${BUILD_DIR} && cd ${BUILD_DIR}
|
||||
- ${SRC_DIR}/configure ${CONFIG} --extra-cflags="-fsanitize=undefined -Werror" || { cat config.log && exit 1; }
|
||||
- ${SRC_DIR}/configure ${CONFIG} --extra-cflags="-fsanitize=undefined -Werror" || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
|
||||
|
||||
- name: "Clang (other-softmmu)"
|
||||
|
@ -211,6 +194,10 @@ jobs:
|
|||
# gprof/gcov are GCC features
|
||||
- name: "GCC gprof/gcov"
|
||||
dist: bionic
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- ninja-build
|
||||
env:
|
||||
- CONFIG="--enable-gprof --enable-gcov --disable-libssh
|
||||
--target-list=${MAIN_SOFTMMU_TARGETS}"
|
||||
|
@ -226,25 +213,6 @@ jobs:
|
|||
- TEST_CMD=""
|
||||
|
||||
|
||||
# We don't need to exercise every backend with every front-end
|
||||
- name: "GCC trace log,simple,syslog (user)"
|
||||
env:
|
||||
- CONFIG="--enable-trace-backends=log,simple,syslog --disable-system"
|
||||
- TEST_CMD=""
|
||||
|
||||
|
||||
- name: "GCC trace ftrace (x86_64-softmmu)"
|
||||
env:
|
||||
- CONFIG="--enable-trace-backends=ftrace --target-list=x86_64-softmmu"
|
||||
- TEST_CMD=""
|
||||
|
||||
|
||||
- name: "GCC trace ust (x86_64-softmmu)"
|
||||
env:
|
||||
- CONFIG="--enable-trace-backends=ust --target-list=x86_64-softmmu"
|
||||
- TEST_CMD=""
|
||||
|
||||
|
||||
# Using newer GCC with sanitizers
|
||||
- name: "GCC9 with sanitizers (softmmu)"
|
||||
dist: bionic
|
||||
|
@ -281,6 +249,7 @@ jobs:
|
|||
- liburcu-dev
|
||||
- libusb-1.0-0-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- sparse
|
||||
- uuid-dev
|
||||
language: generic
|
||||
|
@ -291,35 +260,9 @@ jobs:
|
|||
- TEST_CMD=""
|
||||
before_script:
|
||||
- mkdir -p ${BUILD_DIR} && cd ${BUILD_DIR}
|
||||
- ${SRC_DIR}/configure ${CONFIG} --extra-cflags="-g3 -O0 -fsanitize=thread" || { cat config.log && exit 1; }
|
||||
- ${SRC_DIR}/configure ${CONFIG} --extra-cflags="-g3 -O0 -fsanitize=thread" || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
|
||||
|
||||
# Run check-tcg against linux-user
|
||||
- name: "GCC check-tcg (user)"
|
||||
env:
|
||||
- CONFIG="--disable-system --enable-debug-tcg"
|
||||
- TEST_BUILD_CMD="make build-tcg"
|
||||
- TEST_CMD="make check-tcg"
|
||||
- CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-debug-tcg"
|
||||
|
||||
|
||||
# Run check-tcg against softmmu targets
|
||||
- name: "GCC check-tcg (some-softmmu)"
|
||||
env:
|
||||
- CONFIG="--enable-debug-tcg --target-list=xtensa-softmmu,arm-softmmu,aarch64-softmmu,alpha-softmmu"
|
||||
- TEST_BUILD_CMD="make build-tcg"
|
||||
- TEST_CMD="make check-tcg"
|
||||
- CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-debug-tcg"
|
||||
|
||||
|
||||
# Run check-tcg against softmmu targets (with plugins)
|
||||
- name: "GCC plugins check-tcg (some-softmmu)"
|
||||
env:
|
||||
- CONFIG="--enable-plugins --enable-debug-tcg --target-list=xtensa-softmmu,arm-softmmu,aarch64-softmmu,alpha-softmmu"
|
||||
- TEST_BUILD_CMD="make build-tcg"
|
||||
- TEST_CMD="make check-tcg"
|
||||
- CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-debug-tcg"
|
||||
|
||||
- name: "[aarch64] GCC check-tcg"
|
||||
arch: arm64
|
||||
dist: focal
|
||||
|
@ -346,6 +289,7 @@ jobs:
|
|||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
|
@ -379,6 +323,7 @@ jobs:
|
|||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
|
@ -411,6 +356,7 @@ jobs:
|
|||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
|
@ -450,6 +396,7 @@ jobs:
|
|||
- libzstd-dev
|
||||
- nettle-dev
|
||||
- xfslibs-dev
|
||||
- ninja-build
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
|
@ -463,6 +410,7 @@ jobs:
|
|||
apt_packages:
|
||||
- libgcrypt20-dev
|
||||
- libgnutls28-dev
|
||||
- ninja-build
|
||||
env:
|
||||
- CONFIG="--disable-containers --disable-system"
|
||||
|
||||
|
@ -493,6 +441,7 @@ jobs:
|
|||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
env:
|
||||
- TEST_CMD="make check-unit"
|
||||
- CONFIG="--disable-containers --disable-tcg --enable-kvm
|
||||
|
@ -517,7 +466,7 @@ jobs:
|
|||
- ls -l ${SRC_DIR}/qemu-${QEMU_VERSION}.tar.bz2
|
||||
- tar -xf ${SRC_DIR}/qemu-${QEMU_VERSION}.tar.bz2 && cd qemu-${QEMU_VERSION}
|
||||
- mkdir -p release-build && cd release-build
|
||||
- ../configure ${BASE_CONFIG} ${CONFIG} || { cat config.log && exit 1; }
|
||||
- ../configure ${BASE_CONFIG} ${CONFIG} || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make install
|
||||
allow_failures:
|
||||
- env: UNRELIABLE=true
|
||||
|
|
|
@ -85,8 +85,13 @@ Line width
|
|||
Lines should be 80 characters; try not to make them longer.
|
||||
|
||||
Sometimes it is hard to do, especially when dealing with QEMU subsystems
|
||||
that use long function or symbol names. Even in that case, do not make
|
||||
lines much longer than 80 characters.
|
||||
that use long function or symbol names. If wrapping the line at 80 columns
|
||||
is obviously less readable and more awkward, prefer not to wrap it; better
|
||||
to have an 85 character line than one which is awkwardly wrapped.
|
||||
|
||||
Even in that case, try not to make lines much longer than 80 characters.
|
||||
(The checkpatch script will warn at 100 characters, but this is intended
|
||||
as a guard against obviously-overlength lines, not a target.)
|
||||
|
||||
Rationale:
|
||||
|
||||
|
|
580
Changelog
580
Changelog
|
@ -1,580 +0,0 @@
|
|||
This file documents changes for QEMU releases 0.12 and earlier.
|
||||
For changelog information for later releases, see
|
||||
https://wiki.qemu.org/ChangeLog or look at the git history for
|
||||
more detailed information.
|
||||
|
||||
|
||||
version 0.12.0:
|
||||
|
||||
- Update to SeaBIOS 0.5.0
|
||||
- e1000: fix device link status in Linux (Anthony Liguori)
|
||||
- monitor: fix QMP for balloon command (Luiz Capitulino)
|
||||
- QMP: Return an empty dict by default (Luiz Capitulino)
|
||||
- QMP: Only handle converted commands (Luiz Capitulino)
|
||||
- pci: support PCI based option rom loading (Gerd Hoffman/Anthony Liguori)
|
||||
- Fix backcompat for hotplug of SCSI controllers (Daniel P. Berrange)
|
||||
- fdc: fix migration from 0.11 (Juan Quintela)
|
||||
- vmware-vga: fix segv on cursor resize. (Dave Airlie)
|
||||
- vmware-vga: various fixes (Dave Airlie/Anthony Liguori)
|
||||
- qdev: improve property error reporting. (Gerd Hoffmann)
|
||||
- fix vga names in default_list (Gerd Hoffmann)
|
||||
- usb-host: check mon before using it. (Gerd Hoffmann)
|
||||
- usb-net: use qdev for -usbdevice (Gerd Hoffmann)
|
||||
- monitor: Catch printing to non-existent monitor (Luiz Capitulino)
|
||||
- Avoid permanently disabled QEMU monitor when UNIX migration fails (Daniel P. Berrange)
|
||||
- Fix loading of ELF multiboot kernels (Kevin Wolf)
|
||||
- qemu-io: Fix memory leak (Kevin Wolf)
|
||||
- Fix thinko in linuxboot.S (Paolo Bonzini)
|
||||
- target-i386: Fix evaluation of DR7 register (Jan Kiszka)
|
||||
- vnc: hextile: do not generate ForegroundSpecified and SubrectsColoured tiles (Anthony Liguori)
|
||||
- S390: Bail out without KVM (Alexander Graf)
|
||||
- S390: Don't tell guest we're updating config space (Alexander Graf)
|
||||
- target-s390: Fail on unknown instructions (Alexander Graf)
|
||||
- osdep: Fix runtime failure on older Linux kernels (Andre Przywara)
|
||||
- Fix a make -j race (Juergen Lock)
|
||||
- target-alpha: Fix generic ctz64. (Richard Henderson)
|
||||
- s390: Fix buggy assignment (Stefan Weil)
|
||||
- target-mips: fix user-mode emulation startup (Nathan Froyd)
|
||||
- target-i386: Update CPUID feature set for TCG (Andre Przywara)
|
||||
- s390: fix build on 32 bit host (Michael S. Tsirkin)
|
||||
|
||||
version 0.12.0-rc2:
|
||||
|
||||
- v2: properly save kvm system time msr registers (Glauber Costa)
|
||||
- convert more monitor commands to qmp (Luiz Capitulino)
|
||||
- vnc: fix capslock tracking logic. (Gerd Hoffmann)
|
||||
- QemuOpts: allow larger option values. (Gerd Hoffmann)
|
||||
- scsi: fix drive hotplug. (Gerd Hoffmann)
|
||||
- pci: don't hw_error() when no slot is available. (Gerd Hoffmann)
|
||||
- pci: don't abort() when trying to hotplug with acpi off. (Gerd Hoffmann)
|
||||
- allow default devices to be implemented in config file (Gerd Hoffman)
|
||||
- vc: colorize chardev title line with blue background. (Gerd Hoffmann)
|
||||
- chardev: make chardevs specified in config file work. (Gerd Hoffmann)
|
||||
- qdev: also match bus name for global properties (Gerd Hoffmann)
|
||||
- qdev: add command line option to set global defaults for properties. (Gerd Hoffmann)
|
||||
- kvm: x86: Save/restore exception_index (Jan Kiszka)
|
||||
- qdev: Replace device names containing whitespace (Markus Armbruster)
|
||||
- fix rtc-td-hack on host without high-res timers (Gleb Natapov)
|
||||
- virtio: verify features on load (Michael S. Tsirkin)
|
||||
- vmware_vga: add rom file so that it boots. (Dave Airlie)
|
||||
- Do not abort on qemu_malloc(0) in production builds (Anthony Liguori)
|
||||
- Fix ARM userspace strex implementation. (Paul Brook)
|
||||
- qemu: delete rule target on error (Michael S. Tsirkin)
|
||||
- QMP: add human-readable description to error response (Markus Armbruster)
|
||||
- convert more monitor commands to QError (Markus Armbruster)
|
||||
- monitor: Fix double-prompt after "change vnc passwd BLA" (Markus Armbruster)
|
||||
- monitor: do_cont(): Don't ask for passwords (Luiz Capitulino)
|
||||
- monitor: Introduce 'block_passwd' command (Luiz Capitulino)
|
||||
- pci: interrupt disable bit support (Michael S. Tsirkin)
|
||||
- pci: interrupt status bit implementation (Michael S. Tsirkin)
|
||||
- pci: prepare irq code for interrupt state (Michael S. Tsirkin)
|
||||
- msix: function mask support (Michael S. Tsirkin)
|
||||
- msix: macro rename for function mask support (Michael S. Tsirkin)
|
||||
- cpuid: Fix multicore setup on Intel (Andre Przywara)
|
||||
- kvm: x86: Fix initial kvm_has_msr_star (Jan Kiszka)
|
||||
- Update OpenBIOS images to r640 (Aurelien Jarno)
|
||||
|
||||
version 0.10.2:
|
||||
|
||||
- fix savevm/loadvm (Anthony Liguori)
|
||||
- live migration: fix dirty tracking windows (Glauber Costa)
|
||||
- live migration: improve error propagation (Glauber Costa)
|
||||
- qcow2: fix image creation for > ~2TB images (Chris Wright)
|
||||
- hotplug: fix error handling for if= parameter (Eduardo Habkost)
|
||||
- qcow2: fix data corruption (Nolan Leake)
|
||||
- virtio: fix guest oops with 2.6.25 kernels (Rusty Russell)
|
||||
- SH4: add support for -kernel (Takashi Yoshii, Aurelien Jarno)
|
||||
- hotplug: fix closing of char devices (Jan Kiszka)
|
||||
- hotplug: remove incorrect check for device name (Eduardo Habkost)
|
||||
- enable -k on win32 (Herve Poussineau)
|
||||
- configure: use LANG=C for grep (Andreas Faerber)
|
||||
- fix VGA regression (malc)
|
||||
|
||||
version 0.10.1:
|
||||
|
||||
- virtio-net: check right return size on sg list (Alex Williamson)
|
||||
- Make qemu_announce_self handle holes (live migration after hotplug)
|
||||
(Marcelo Tosatti)
|
||||
- Revert r6804-r6808 (qcow2 allocation info). This series of changes added
|
||||
a high cost to startup for large qcow2 images (Anthony Liguori)
|
||||
- qemu-img: fix help message (Aurelien Jarno)
|
||||
- Fix build for non-default installs of SDL (Anthony Liguori)
|
||||
- Fix race condition in env->interrupt_request. When using TCG and a dynticks
|
||||
host timer, this condition could cause TCG to get stuck in an infinite
|
||||
loop (Aurelien Jarno)
|
||||
- Fix reading encrypted hard disk passwords during early startup (Jan Kiszka)
|
||||
- Fix encrypted disk reporting in 'info block' (Jan Kiszka)
|
||||
- Fix console size with tiny displays (MusicPal) (Jan Kiszka)
|
||||
- Improve error handling in bdrv_open2 (Jan Kiszka)
|
||||
- Avoid leaking data in mux'ed character devices (Jan Kiszka)
|
||||
- Fix initial character device reset (no banner in monitor) (Jan Kiszka)
|
||||
- Fix cpuid KVM crash on i386 host (Lubomir Rintel)
|
||||
- Fix SLES10sp2 installation by adding ISTAT1 register to LSI SCSI emulation
|
||||
(Ryan Harper)
|
||||
|
||||
version 0.10.0:
|
||||
|
||||
- TCG support (No longer requires GCC 3.x)
|
||||
- Kernel Virtual Machine acceleration support
|
||||
- BSD userspace emulation
|
||||
- Bluetooth emulation and host passthrough support
|
||||
- GDB XML register description support
|
||||
- Intel e1000 emulation
|
||||
- HPET emulation
|
||||
- VirtIO paravirtual device support
|
||||
- Marvell 88w8618 / MusicPal emulation
|
||||
- Nokia N-series tablet emulation / OMAP2 processor emulation
|
||||
- PCI hotplug support
|
||||
- Live migration and new save/restore formats
|
||||
- Curses display support
|
||||
- qemu-nbd utility to mount supported block formats
|
||||
- Altivec support in PPC emulation and new firmware (OpenBIOS)
|
||||
- Multiple VNC clients are now supported
|
||||
- TLS encryption is now supported in VNC
|
||||
- MIPS Magnum R4000 machine (Hervé Poussineau)
|
||||
- Braille support (Samuel Thibault)
|
||||
- Freecom MusicPal system emulation (Jan Kiszka)
|
||||
- OMAP242x and Nokia N800, N810 machines (Andrzej Zaborowski)
|
||||
- EsounD audio driver (Frederick Reeve)
|
||||
- Gravis Ultrasound GF1 sound card (Tibor "TS" Schütz)
|
||||
- Many, many, bug fixes and new features
|
||||
|
||||
version 0.9.1:
|
||||
|
||||
- TFTP booting from host directory (Anthony Liguori, Erwan Velu)
|
||||
- Tap device emulation for Solaris (Sittichai Palanisong)
|
||||
- Monitor multiplexing to several I/O channels (Jason Wessel)
|
||||
- ds1225y nvram support (Herve Poussineau)
|
||||
- CPU model selection support (J. Mayer, Paul Brook, Herve Poussineau)
|
||||
- Several Sparc fixes (Aurelien Jarno, Blue Swirl, Robert Reif)
|
||||
- MIPS 64-bit FPU support (Thiemo Seufer)
|
||||
- Xscale PDA emulation (Andrzej Zaborowski)
|
||||
- ColdFire system emulation (Paul Brook)
|
||||
- Improved SH4 support (Magnus Damm)
|
||||
- MIPS64 support (Aurelien Jarno, Thiemo Seufer)
|
||||
- Preliminary Alpha guest support (J. Mayer)
|
||||
- Read-only support for Parallels disk images (Alex Beregszaszi)
|
||||
- SVM (x86 virtualization) support (Alexander Graf)
|
||||
- CRIS emulation (Edgar E. Iglesias)
|
||||
- SPARC32PLUS execution support (Blue Swirl)
|
||||
- MIPS mipssim pseudo machine (Thiemo Seufer)
|
||||
- Strace for Linux userland emulation (Stuart Anderson, Thayne Harbaugh)
|
||||
- OMAP310 MPU emulation plus Palm T|E machine (Andrzej Zaborowski)
|
||||
- ARM v6, v7, NEON SIMD and SMP emulation (Paul Brook/CodeSourcery)
|
||||
- Gumstix boards: connex and verdex emulation (Thorsten Zitterell)
|
||||
- Intel mainstone II board emulation (Armin Kuster)
|
||||
- VMware SVGA II graphics card support (Andrzej Zaborowski)
|
||||
|
||||
version 0.9.0:
|
||||
|
||||
- Support for relative paths in backing files for disk images
|
||||
- Async file I/O API
|
||||
- New qcow2 disk image format
|
||||
- Support of multiple VM snapshots
|
||||
- Linux: specific host CDROM and floppy support
|
||||
- SMM support
|
||||
- Moved PCI init, MP table init and ACPI table init to Bochs BIOS
|
||||
- Support for MIPS32 Release 2 instruction set (Thiemo Seufer)
|
||||
- MIPS Malta system emulation (Aurelien Jarno, Stefan Weil)
|
||||
- Darwin userspace emulation (Pierre d'Herbemont)
|
||||
- m68k user support (Paul Brook)
|
||||
- several x86 and x86_64 emulation fixes
|
||||
- Mouse relative offset VNC extension (Anthony Liguori)
|
||||
- PXE boot support (Anthony Liguori)
|
||||
- '-daemonize' option (Anthony Liguori)
|
||||
|
||||
version 0.8.2:
|
||||
|
||||
- ACPI support
|
||||
- PC VGA BIOS fixes
|
||||
- switch to OpenBios for SPARC targets (Blue Swirl)
|
||||
- VNC server fixes
|
||||
- MIPS FPU support (Marius Groeger)
|
||||
- Solaris/SPARC host support (Juergen Keil)
|
||||
- PPC breakpoints and single stepping (Jason Wessel)
|
||||
- USB updates (Paul Brook)
|
||||
- UDP/TCP/telnet character devices (Jason Wessel)
|
||||
- Windows sparse file support (Frediano Ziglio)
|
||||
- RTL8139 NIC TCP segmentation offloading (Igor Kovalenko)
|
||||
- PCNET NIC support (Antony T Curtis)
|
||||
- Support for variable frequency host CPUs
|
||||
- Workaround for win32 SMP hosts
|
||||
- Support for AMD Flash memories (Jocelyn Mayer)
|
||||
- Audio capture to WAV files support (malc)
|
||||
|
||||
version 0.8.1:
|
||||
|
||||
- USB tablet support (Brad Campbell, Anthony Liguori)
|
||||
- win32 host serial support (Kazu)
|
||||
- PC speaker support (Joachim Henke)
|
||||
- IDE LBA48 support (Jens Axboe)
|
||||
- SSE3 support
|
||||
- Solaris port (Juergen Keil)
|
||||
- Preliminary SH4 target (Samuel Tardieu)
|
||||
- VNC server (Anthony Liguori)
|
||||
- slirp fixes (Ed Swierk et al.)
|
||||
- USB fixes
|
||||
- ARM Versatile Platform Baseboard emulation (Paul Brook)
|
||||
|
||||
version 0.8.0:
|
||||
|
||||
- ARM system emulation: Arm Integrator/CP board with an arm1026ej-s
|
||||
cpu (Paul Brook)
|
||||
- SMP support
|
||||
- Mac OS X cocoa improvements (Mike Kronenberg)
|
||||
- Mac OS X CoreAudio driver (Mike Kronenberg)
|
||||
- DirectSound driver (malc)
|
||||
- ALSA audio driver (malc)
|
||||
- new audio options: '-soundhw' and '-audio-help' (malc)
|
||||
- ES1370 PCI audio device (malc)
|
||||
- Initial USB support
|
||||
- Linux host serial port access
|
||||
- Linux host low level parallel port access
|
||||
- New network emulation code supporting VLANs.
|
||||
- MIPS and MIPSel User Linux emulation
|
||||
- MIPS fixes to boot Linux (Daniel Jacobowitz)
|
||||
- NX bit support
|
||||
- Initial SPARC SMP support (Blue Swirl)
|
||||
- Major overhaul of the virtual FAT driver for read/write support
|
||||
(Johannes Schindelin)
|
||||
|
||||
version 0.7.2:
|
||||
|
||||
- x86_64 fixes (Win2000 and Linux 2.6 boot in 32 bit)
|
||||
- merge self modifying code handling in dirty ram page mechanism.
|
||||
- MIPS fixes (Ralf Baechle)
|
||||
- better user net performances
|
||||
|
||||
version 0.7.1:
|
||||
|
||||
- read-only Virtual FAT support (Johannes Schindelin)
|
||||
- Windows 2000 install disk full hack (original idea from Vladimir
|
||||
N. Oleynik)
|
||||
- VMDK disk image creation (Filip Navara)
|
||||
- SPARC64 progress (Blue Swirl)
|
||||
- initial MIPS support (Jocelyn mayer)
|
||||
- MIPS improvements (Ralf Baechle)
|
||||
- 64 bit fixes in user networking (initial patch by Gwenole Beauchesne)
|
||||
- IOAPIC support (Filip Navara)
|
||||
|
||||
version 0.7.0:
|
||||
|
||||
- better BIOS translation and HDD geometry auto-detection
|
||||
- user mode networking bug fix
|
||||
- undocumented FPU ops support
|
||||
- Cirrus VGA: support for 1280x1024x[8,15,16] modes
|
||||
- 'pidfile' option
|
||||
- .dmg disk image format support (Johannes Schindelin)
|
||||
- keymaps support (initial patch by Johannes Schindelin)
|
||||
- big endian ARM support (Lennert Buytenhek)
|
||||
- added generic 64 bit target support
|
||||
- x86_64 target support
|
||||
- initial APIC support
|
||||
- MMX/SSE/SSE2/PNI support
|
||||
- PC parallel port support (Mark Jonckheere)
|
||||
- initial SPARC64 support (Blue Swirl)
|
||||
- SPARC target boots Linux (Blue Swirl)
|
||||
- armv5te user mode support (Paul Brook)
|
||||
- ARM VFP support (Paul Brook)
|
||||
- ARM "Angel" semihosting syscalls (Paul Brook)
|
||||
- user mode gdb stub support (Paul Brook)
|
||||
- Samba 3 support
|
||||
- initial Cocoa support (Pierre d'Herbemont)
|
||||
- generic FPU emulation code
|
||||
- Virtual PC read-only disk image support (Alex Beregszaszi)
|
||||
|
||||
version 0.6.1:
|
||||
|
||||
- Mac OS X port (Pierre d'Herbemont)
|
||||
- Virtual console support
|
||||
- Better monitor line edition
|
||||
- New block device layer
|
||||
- New 'qcow' growable disk image support with AES encryption and
|
||||
transparent decompression
|
||||
- VMware 3 and 4 read-only disk image support (untested)
|
||||
- Support for up to 4 serial ports
|
||||
- TFTP server support (Magnus Damm)
|
||||
- Port redirection support in user mode networking
|
||||
- Support for not executable data sections
|
||||
- Compressed loop disk image support (Johannes Schindelin)
|
||||
- Level triggered IRQ fix (aka NE2000 PCI performance fix) (Steve
|
||||
Wormley)
|
||||
- Fixed Fedora Core 2 problems (now you can run qemu without any
|
||||
LD_ASSUME_KERNEL tricks on FC2)
|
||||
- DHCP fix for Windows (accept DHCPREQUEST alone)
|
||||
- SPARC system emulation (Blue Swirl)
|
||||
- Automatic Samba configuration for host file access from Windows.
|
||||
- '-loadvm' and '-full-screen' options
|
||||
- ne2000 savevm support (Johannes Schindelin)
|
||||
- Ctrl-Alt is now the default grab key. Ctrl-Alt-[0-9] switches to
|
||||
the virtual consoles.
|
||||
- BIOS floppy fix for NT4 (Mike Nordell, Derek Fawcus, Volker Ruppert)
|
||||
- Floppy fixes for NT4 and NT5 (Mike Nordell)
|
||||
- NT4 IDE fixes (Ben Pfaf, Mike Nordell)
|
||||
- SDL Audio support and SB16 fixes (malc)
|
||||
- ENTER instruction bug fix (initial patch by Stefan Kisdaroczi)
|
||||
- VGA font change fix
|
||||
- VGA read-only CRTC register fix
|
||||
|
||||
version 0.6.0:
|
||||
|
||||
- minimalist FPU exception support (NetBSD FPU probe fix)
|
||||
- cr0.ET fix (Win95 boot)
|
||||
- *BSD port (Markus Niemisto)
|
||||
- I/O access fix (signaled by Mark Jonckheere)
|
||||
- IDE drives serial number fix (Mike Nordell)
|
||||
- int13 CDROM BIOS fix (aka Solaris x86 install CD fix)
|
||||
- int15, ah=86 BIOS fix (aka Solaris x86 hardware probe hang up fix)
|
||||
- BSR/BSF "undefined behaviour" fix
|
||||
- vmdk2raw: convert VMware disk images to raw images
|
||||
- PCI support
|
||||
- NE2K PCI support
|
||||
- dummy VGA PCI support
|
||||
- VGA font selection fix (Daniel Serpell)
|
||||
- PIC reset fix (Hidemi KAWAI)
|
||||
- PIC spurious irq support (aka Solaris install bug)
|
||||
- added '-localtime' option
|
||||
- Cirrus CL-GD54xx VGA support (initial patch by Makoto Suzuki (suzu))
|
||||
- APM and system shutdown support
|
||||
- Fixed system reset
|
||||
- Support for other PC BIOSes
|
||||
- Initial PowerMac hardware emulation
|
||||
- PowerMac/PREP OpenFirmware compatible BIOS (Jocelyn Mayer)
|
||||
- initial IDE BMDMA support (needed for Darwin x86)
|
||||
- Set the default memory size for PC emulation to 128 MB
|
||||
|
||||
version 0.5.5:
|
||||
|
||||
- SDL full screen support (initial patch by malc)
|
||||
- VGA support on PowerPC PREP
|
||||
- VBE fixes (Matthew Mastracci)
|
||||
- PIT fixes (aka Win98 hardware probe and "VGA slowness" bug)
|
||||
- IDE master only fixes (aka Win98 CD-ROM probe bug)
|
||||
- ARM load/store half word fix (Ulrich Hecht)
|
||||
- FDC fixes for Win98
|
||||
|
||||
version 0.5.4:
|
||||
|
||||
- qemu-fast fixes
|
||||
- BIOS area protection fix (aka EMM386.EXE fix) (Mike Nordell)
|
||||
- keyboard/mouse fix (Mike Nordell)
|
||||
- IDE fixes (Linux did not recognized slave drivers)
|
||||
- VM86 EIP masking fix (aka NT5 install fix) (Mike Nordell)
|
||||
- QEMU can now boot a PowerPC Linux kernel (Jocelyn Mayer)
|
||||
- User mode network stack
|
||||
- imul imm8 fix + 0x82 opcode support (Hidemi KAWAI)
|
||||
- precise self modifying code (aka BeOS install bug)
|
||||
|
||||
version 0.5.3:
|
||||
|
||||
- added Bochs VESA VBE support
|
||||
- VGA memory map mode 3 access fix (OS/2 install fix)
|
||||
- IDE fixes (Jens Axboe)
|
||||
- CPU interrupt fixes
|
||||
- fixed various TLB invalidation cases (NT install)
|
||||
- fixed cr0.WP semantics (XP install)
|
||||
- direct chaining support for SPARC and PowerPC (faster)
|
||||
- ARM NWFPE support (initial patch by Ulrich Hecht)
|
||||
- added specific x86 to x86 translator (close to native performance
|
||||
in qemu-i386 and qemu-fast)
|
||||
- shm syscalls support (Paul McKerras)
|
||||
- added accurate CR0.MP/ME/TS emulation
|
||||
- fixed DMA memory write access (Win95 boot floppy fix)
|
||||
- graphical x86 linux loader
|
||||
- command line monitor
|
||||
- generic removable device support
|
||||
- support of CD-ROM change
|
||||
- multiple network interface support
|
||||
- initial x86-64 host support (Gwenole Beauchesne)
|
||||
- lret to outer privilege fix (OS/2 install fix)
|
||||
- task switch fixes (SkyOS boot)
|
||||
- VM save/restore commands
|
||||
- new timer API
|
||||
- more precise RTC emulation (periodic timers + time updates)
|
||||
- Win32 port (initial patch by Kazu)
|
||||
|
||||
version 0.5.2:
|
||||
|
||||
- improved soft MMU speed (assembly functions and specializing)
|
||||
- improved multitasking speed by avoiding flushing TBs when
|
||||
switching tasks
|
||||
- improved qemu-fast speed
|
||||
- improved self modifying code handling (big performance gain in
|
||||
softmmu mode).
|
||||
- fixed IO checking
|
||||
- fixed CD-ROM detection (win98 install CD)
|
||||
- fixed addseg real mode bug (GRUB boot fix)
|
||||
- added ROM memory support (win98 boot)
|
||||
- fixed 'call Ev' in case of paging exception
|
||||
- updated the script 'qemu-binfmt-conf.sh' to use QEMU automagically
|
||||
when launching executables for the supported target CPUs.
|
||||
- PowerPC system emulation update (Jocelyn Mayer)
|
||||
- PC floppy emulation and DMA fixes (Jocelyn Mayer)
|
||||
- polled mode for PIC (Jocelyn Mayer)
|
||||
- fixed PTE dirty bit handling
|
||||
- fixed xadd same reg bug
|
||||
- fixed cmpxchg exception safeness
|
||||
- access to virtual memory in gdb stub
|
||||
- task gate and NT flag fixes
|
||||
- eflags optimisation fix for string operations
|
||||
|
||||
version 0.5.1:
|
||||
|
||||
- float access fixes when using soft mmu
|
||||
- PC emulation support on PowerPC
|
||||
- A20 support
|
||||
- IDE CD-ROM emulation
|
||||
- ARM fixes (Ulrich Hecht)
|
||||
- SB16 emulation (malc)
|
||||
- IRET and INT fixes in VM86 mode with IOPL=3
|
||||
- Port I/Os use TSS io map
|
||||
- Full task switching/task gate support
|
||||
- added verr, verw, arpl, fcmovxx
|
||||
- PowerPC target support (Jocelyn Mayer)
|
||||
- Major SPARC target fixes (dynamically linked programs begin to work)
|
||||
|
||||
version 0.5.0:
|
||||
|
||||
- full hardware level VGA emulation
|
||||
- graphical display with SDL
|
||||
- added PS/2 mouse and keyboard emulation
|
||||
- popw (%esp) fix
|
||||
- mov to/from segment data width fix
|
||||
- added real mode support
|
||||
- added Bochs BIOS and LGPL'ed VGA BIOS loader in qemu
|
||||
- m68k host port (Richard Zidlicky)
|
||||
- partial soft MMU support for memory mapped I/Os
|
||||
- multi-target build
|
||||
- fixed: no error code in hardware interrupts
|
||||
- fixed: pop ss, mov ss, x and sti disable hardware irqs for the next insn
|
||||
- correct single stepping through string operations
|
||||
- preliminary SPARC target support (Thomas M. Ogrisegg)
|
||||
- tun-fd option (Rusty Russell)
|
||||
- automatic IDE geometry detection
|
||||
- renamed 'vl' to qemu[-fast] and user qemu to qemu-{cpu}.
|
||||
- added man page
|
||||
- added full soft mmu mode to launch unpatched OSes.
|
||||
|
||||
version 0.4.3:
|
||||
|
||||
- x86 exception fix in case of nop instruction.
|
||||
- gcc 3.2.2 bug workaround (RedHat 9 fix)
|
||||
- sparc and Alpha host fixes
|
||||
- many ARM target fixes: 'ls' and 'bash' can be launched.
|
||||
|
||||
version 0.4.2:
|
||||
|
||||
- many exception handling fixes (can compile a Linux kernel inside vl)
|
||||
- IDE emulation support
|
||||
- initial GDB stub support
|
||||
- deferred update support for disk images (Rusty Russell)
|
||||
- accept User Mode Linux Copy On Write disk images
|
||||
- SMP kernels can at least be booted
|
||||
|
||||
version 0.4.1:
|
||||
|
||||
- more accurate timer support in vl.
|
||||
- more reliable NE2000 probe in vl.
|
||||
- added 2.5.66 kernel in vl-test.
|
||||
- added VLTMPDIR environment variable in vl.
|
||||
|
||||
version 0.4:
|
||||
|
||||
- initial support for ring 0 x86 processor emulation
|
||||
- fixed signal handling for correct dosemu DPMI emulation
|
||||
- fast x86 MMU emulation with mmap()
|
||||
- fixed popl (%esp) case
|
||||
- Linux kernel can be executed by QEMU with the 'vl' command.
|
||||
|
||||
version 0.3:
|
||||
|
||||
- initial support for ARM emulation
|
||||
- added fnsave, frstor, fnstenv, fldenv FPU instructions
|
||||
- added FPU register save in signal emulation
|
||||
- initial ARM port
|
||||
- Sparc and Alpha ports work on the regression test
|
||||
- generic ioctl number conversion
|
||||
- fixed ioctl type conversion
|
||||
|
||||
version 0.2:
|
||||
|
||||
- PowerPC disassembly and ELF symbols output (Rusty Russell)
|
||||
- flock support (Rusty Russell)
|
||||
- ugetrlimit support (Rusty Russell)
|
||||
- fstat64 fix (Rusty Russell)
|
||||
- initial Alpha port (Falk Hueffner)
|
||||
- initial IA64 port (Matt Wilson)
|
||||
- initial Sparc and Sparc64 port (David S. Miller)
|
||||
- added HLT instruction
|
||||
- LRET instruction fix.
|
||||
- added GPF generation for I/Os.
|
||||
- added INT3 and TF flag support.
|
||||
- SHL instruction C flag fix.
|
||||
- mmap emulation for host page size > 4KB
|
||||
- self-modifying code support
|
||||
- better VM86 support (dosemu works on non trivial programs)
|
||||
- precise exception support (EIP is computed correctly in most cases)
|
||||
- more precise LDT/GDT/IDT emulation
|
||||
- faster segment load in vm86 mode
|
||||
- direct chaining of basic blocks (faster emulation)
|
||||
|
||||
version 0.1.6:
|
||||
|
||||
- automatic library search system. QEMU can now work with unpatched
|
||||
ELF dynamic loader and libc (Rusty Russell).
|
||||
- ISO C warning fixes (Alistair Strachan)
|
||||
- first self-virtualizable version (works only as long as the
|
||||
translation cache is not flushed)
|
||||
- RH9 fixes
|
||||
|
||||
version 0.1.5:
|
||||
|
||||
- ppc64 support + personality() patch (Rusty Russell)
|
||||
- first Alpha CPU patches (Falk Hueffner)
|
||||
- removed bfd.h dependency
|
||||
- fixed shrd, shld, idivl and divl on PowerPC.
|
||||
- fixed buggy glibc PowerPC rint() function (test-i386 passes now on PowerPC).
|
||||
|
||||
version 0.1.4:
|
||||
|
||||
- more accurate VM86 emulation (can launch small DOS 16 bit
|
||||
executables in wine).
|
||||
- fixed push/pop fs/gs
|
||||
- added iret instruction.
|
||||
- added times() syscall and SIOCATMARK ioctl.
|
||||
|
||||
version 0.1.3:
|
||||
|
||||
- S390 support (Ulrich Weigand)
|
||||
- glibc 2.3.x compile fix (Ulrich Weigand)
|
||||
- socketcall endian fix (Ulrich Weigand)
|
||||
- struct sockaddr endian fix (Ulrich Weigand)
|
||||
- sendmsg/recvmsg endian fix (Ulrich Weigand)
|
||||
- execve endian fix (Ulrich Weigand)
|
||||
- fdset endian fix (Ulrich Weigand)
|
||||
- partial setsockopt syscall support (Ulrich Weigand)
|
||||
- more accurate pushf/popf emulation
|
||||
- first partial vm86() syscall support (can be used with runcom example).
|
||||
- added bound, cmpxchg8b, cpuid instructions
|
||||
- added 16 bit addressing support/override for string operations
|
||||
- poll() fix
|
||||
|
||||
version 0.1.2:
|
||||
|
||||
- compile fixes
|
||||
- xlat instruction
|
||||
- xchg instruction memory lock
|
||||
- added simple vm86 example (not working with QEMU yet). The 54 byte
|
||||
DOS executable 'pi_10.com' program was released by Bertram
|
||||
Felgenhauer (more information at http://www.boo.net/~jasonp/pipage.html).
|
||||
|
||||
version 0.1.1:
|
||||
|
||||
- glibc 2.2 compilation fixes
|
||||
- added -s and -L options
|
||||
- binary distribution of x86 glibc and wine
|
||||
- big endian fixes in ELF loader and getdents.
|
||||
|
||||
version 0.1:
|
||||
|
||||
- initial public release.
|
206
MAINTAINERS
206
MAINTAINERS
|
@ -112,12 +112,11 @@ L: qemu-s390x@nongnu.org
|
|||
Guest CPU cores (TCG)
|
||||
---------------------
|
||||
Overall TCG CPUs
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: softmmu/cpus.c
|
||||
F: cpus-common.c
|
||||
F: exec.c
|
||||
F: accel/tcg/
|
||||
F: accel/stubs/tcg-stub.c
|
||||
F: scripts/decodetree.py
|
||||
|
@ -139,7 +138,7 @@ F: include/fpu/
|
|||
F: tests/fp/
|
||||
|
||||
Alpha TCG CPUs
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
F: target/alpha/
|
||||
F: tests/tcg/alpha/
|
||||
|
@ -159,6 +158,7 @@ F: disas/arm.c
|
|||
F: disas/arm-a64.cc
|
||||
F: disas/libvixl/
|
||||
F: docs/system/target-arm.rst
|
||||
F: docs/system/arm/cpu-features.rst
|
||||
|
||||
ARM SMMU
|
||||
M: Eric Auger <eric.auger@redhat.com>
|
||||
|
@ -186,7 +186,7 @@ F: tests/tcg/cris/
|
|||
F: disas/cris.c
|
||||
|
||||
HPPA (PA-RISC) TCG CPUs
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
F: target/hppa/
|
||||
F: hw/hppa/
|
||||
|
@ -221,14 +221,14 @@ F: hw/microblaze/
|
|||
F: disas/microblaze.c
|
||||
|
||||
MIPS TCG CPUs
|
||||
M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
R: Aurelien Jarno <aurelien@aurel32.net>
|
||||
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: target/mips/
|
||||
F: default-configs/*mips*
|
||||
F: disas/*mips*
|
||||
F: disas/mips.c
|
||||
F: docs/system/cpu-models-mips.rst.inc
|
||||
F: hw/intc/mips_gic.c
|
||||
F: hw/mips/
|
||||
|
@ -238,11 +238,13 @@ F: include/hw/intc/mips_gic.h
|
|||
F: include/hw/mips/
|
||||
F: include/hw/misc/mips_*
|
||||
F: include/hw/timer/mips_gictimer.h
|
||||
F: tests/acceptance/linux_ssh_mips_malta.py
|
||||
F: tests/acceptance/machine_mips_malta.py
|
||||
F: tests/tcg/mips/
|
||||
K: ^Subject:.*(?i)mips
|
||||
|
||||
MIPS TCG CPUs (nanoMIPS ISA)
|
||||
S: Orphan
|
||||
F: disas/nanomips.*
|
||||
|
||||
Moxie TCG CPUs
|
||||
M: Anthony Green <green@moxielogic.com>
|
||||
S: Maintained
|
||||
|
@ -257,7 +259,6 @@ M: Marek Vasut <marex@denx.de>
|
|||
S: Maintained
|
||||
F: target/nios2/
|
||||
F: hw/nios2/
|
||||
F: hw/intc/nios2_iic.c
|
||||
F: disas/nios2.c
|
||||
F: default-configs/nios2-softmmu.mak
|
||||
|
||||
|
@ -270,6 +271,7 @@ F: tests/tcg/openrisc/
|
|||
|
||||
PowerPC TCG CPUs
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
F: target/ppc/
|
||||
|
@ -292,11 +294,11 @@ F: linux-user/host/riscv64/
|
|||
|
||||
RENESAS RX CPUs
|
||||
M: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: target/rx/
|
||||
|
||||
S390 TCG CPUs
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
S: Maintained
|
||||
F: target/s390x/
|
||||
|
@ -332,7 +334,7 @@ F: include/hw/unicore32/
|
|||
|
||||
X86 TCG CPUs
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
M: Eduardo Habkost <ehabkost@redhat.com>
|
||||
S: Maintained
|
||||
F: target/i386/
|
||||
|
@ -341,7 +343,7 @@ F: tests/tcg/x86_64/
|
|||
F: hw/i386/
|
||||
F: disas/i386.c
|
||||
F: docs/system/cpu-models-x86.rst.inc
|
||||
T: git https://github.com/ehabkost/qemu.git x86-next
|
||||
T: git https://gitlab.com/ehabkost/qemu.git x86-next
|
||||
|
||||
Xtensa TCG CPUs
|
||||
M: Max Filippov <jcmvbkbc@gmail.com>
|
||||
|
@ -386,13 +388,13 @@ S: Maintained
|
|||
F: target/arm/kvm.c
|
||||
|
||||
MIPS KVM CPUs
|
||||
M: Huacai Chen <chenhc@lemote.com>
|
||||
M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
|
||||
M: Huacai Chen <chenhuacai@kernel.org>
|
||||
S: Odd Fixes
|
||||
F: target/mips/kvm.c
|
||||
|
||||
PPC KVM CPUs
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Greg Kurz <groug@kaod.org>
|
||||
S: Maintained
|
||||
F: target/ppc/kvm.c
|
||||
|
||||
|
@ -424,13 +426,13 @@ M: Paolo Bonzini <pbonzini@redhat.com>
|
|||
M: Marcelo Tosatti <mtosatti@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
F: target/i386/kvm.c
|
||||
F: target/i386/kvm/
|
||||
F: scripts/kvm/vmxcap
|
||||
|
||||
Guest CPU Cores (other accelerators)
|
||||
------------------------------------
|
||||
Overall
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: include/sysemu/accel.h
|
||||
|
@ -443,17 +445,13 @@ M: Cameron Esfahani <dirty@apple.com>
|
|||
M: Roman Bolshakov <r.bolshakov@yadro.com>
|
||||
W: https://wiki.qemu.org/Features/HVF
|
||||
S: Maintained
|
||||
F: accel/stubs/hvf-stub.c
|
||||
F: target/i386/hvf/
|
||||
F: include/sysemu/hvf.h
|
||||
|
||||
WHPX CPUs
|
||||
M: Sunil Muthuswamy <sunilmut@microsoft.com>
|
||||
S: Supported
|
||||
F: target/i386/whpx-all.c
|
||||
F: target/i386/whpx-cpus.c
|
||||
F: target/i386/whp-dispatch.h
|
||||
F: accel/stubs/whpx-stub.c
|
||||
F: target/i386/whpx/
|
||||
F: include/sysemu/whpx.h
|
||||
|
||||
Guest CPU Cores (Xen)
|
||||
|
@ -493,7 +491,7 @@ W: https://github.com/intel/haxm/issues
|
|||
S: Maintained
|
||||
F: accel/stubs/hax-stub.c
|
||||
F: include/sysemu/hax.h
|
||||
F: target/i386/hax-*
|
||||
F: target/i386/hax/
|
||||
|
||||
Hosts
|
||||
-----
|
||||
|
@ -534,7 +532,7 @@ F: qemu.nsi
|
|||
|
||||
Alpha Machines
|
||||
--------------
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
F: hw/alpha/
|
||||
F: hw/isa/smc37c669-superio.c
|
||||
|
@ -558,7 +556,7 @@ S: Maintained
|
|||
F: hw/*/allwinner-h3*
|
||||
F: include/hw/*/allwinner-h3*
|
||||
F: hw/arm/orangepi.c
|
||||
F: docs/system/orangepi.rst
|
||||
F: docs/system/arm/orangepi.rst
|
||||
|
||||
ARM PrimeCell and CMSDK devices
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
|
@ -759,8 +757,10 @@ L: qemu-arm@nongnu.org
|
|||
S: Supported
|
||||
F: hw/*/npcm7xx*
|
||||
F: include/hw/*/npcm7xx*
|
||||
F: tests/qtest/npcm7xx*
|
||||
F: pc-bios/npcm7xx_bootrom.bin
|
||||
F: roms/vbootrom
|
||||
F: docs/system/arm/nuvoton.rst
|
||||
|
||||
nSeries
|
||||
M: Andrzej Zaborowski <balrogg@gmail.com>
|
||||
|
@ -800,6 +800,7 @@ F: hw/arm/raspi_platform.h
|
|||
F: hw/*/bcm283*
|
||||
F: include/hw/arm/raspi*
|
||||
F: include/hw/*/bcm283*
|
||||
F: docs/system/arm/raspi.rst
|
||||
|
||||
Real View
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
|
@ -854,6 +855,7 @@ R: Leif Lindholm <leif@nuviainc.com>
|
|||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/arm/sbsa-ref.c
|
||||
F: docs/system/arm/sbsa.rst
|
||||
|
||||
Sharp SL-5500 (Collie) PDA
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
|
@ -998,6 +1000,7 @@ F: include/hw/*/*aspeed*
|
|||
F: include/hw/misc/pca9552*.h
|
||||
F: hw/net/ftgmac100.c
|
||||
F: include/hw/net/ftgmac100.h
|
||||
F: docs/system/arm/aspeed.rst
|
||||
|
||||
NRF51
|
||||
M: Joel Stanley <joel@jms.id.au>
|
||||
|
@ -1043,7 +1046,7 @@ F: hw/*/etraxfs_*.c
|
|||
HP-PARISC Machines
|
||||
------------------
|
||||
HP B160L
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
R: Helge Deller <deller@gmx.de>
|
||||
S: Odd Fixes
|
||||
F: default-configs/hppa-softmmu.mak
|
||||
|
@ -1124,10 +1127,9 @@ F: hw/display/jazz_led.c
|
|||
F: hw/dma/rc4030.c
|
||||
|
||||
Malta
|
||||
M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
R: Aurelien Jarno <aurelien@aurel32.net>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: hw/isa/piix4.c
|
||||
F: hw/acpi/piix4.c
|
||||
F: hw/mips/malta.c
|
||||
|
@ -1137,23 +1139,14 @@ F: tests/acceptance/linux_ssh_mips_malta.py
|
|||
F: tests/acceptance/machine_mips_malta.py
|
||||
|
||||
Mipssim
|
||||
M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
|
||||
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
|
||||
S: Odd Fixes
|
||||
S: Orphaned
|
||||
F: hw/mips/mipssim.c
|
||||
F: hw/net/mipsnet.c
|
||||
|
||||
R4000
|
||||
M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
|
||||
R: Aurelien Jarno <aurelien@aurel32.net>
|
||||
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
|
||||
S: Obsolete
|
||||
F: hw/mips/r4k.c
|
||||
|
||||
Fuloong 2E
|
||||
M: Huacai Chen <chenhc@lemote.com>
|
||||
M: Huacai Chen <chenhuacai@kernel.org>
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
|
||||
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
S: Odd Fixes
|
||||
F: hw/mips/fuloong2e.c
|
||||
|
@ -1162,15 +1155,15 @@ F: hw/pci-host/bonito.c
|
|||
F: include/hw/isa/vt82c686.h
|
||||
|
||||
Loongson-3 virtual platforms
|
||||
M: Huacai Chen <chenhc@lemote.com>
|
||||
M: Huacai Chen <chenhuacai@kernel.org>
|
||||
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
S: Maintained
|
||||
F: hw/intc/loongson_liointc.c
|
||||
|
||||
Boston
|
||||
M: Paul Burton <pburton@wavecomp.com>
|
||||
M: Paul Burton <paulburton@kernel.org>
|
||||
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: hw/core/loader-fit.c
|
||||
F: hw/mips/boston.c
|
||||
F: hw/pci-host/xilinx-pcie.c
|
||||
|
@ -1187,18 +1180,21 @@ PowerPC Machines
|
|||
----------------
|
||||
405
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/ppc/ppc405_boards.c
|
||||
|
||||
Bamboo
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/ppc/ppc440_bamboo.c
|
||||
|
||||
e500
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/ppc/e500*
|
||||
|
@ -1212,6 +1208,7 @@ F: pc-bios/u-boot.e500
|
|||
|
||||
mpc8544ds
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/ppc/mpc8544ds.c
|
||||
|
@ -1220,6 +1217,7 @@ F: hw/ppc/mpc8544_guts.c
|
|||
New World (mac99)
|
||||
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/ppc/mac_newworld.c
|
||||
|
@ -1239,6 +1237,7 @@ F: pc-bios/qemu_vga.ndrv
|
|||
Old World (g3beige)
|
||||
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/ppc/mac_oldworld.c
|
||||
|
@ -1252,6 +1251,8 @@ F: pc-bios/qemu_vga.ndrv
|
|||
|
||||
PReP
|
||||
M: Hervé Poussineau <hpoussin@reactos.org>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/ppc/prep.c
|
||||
|
@ -1268,6 +1269,7 @@ F: tests/acceptance/ppc_prep_40p.py
|
|||
|
||||
sPAPR
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Supported
|
||||
F: hw/*/spapr*
|
||||
|
@ -1285,6 +1287,7 @@ F: tests/qtest/libqos/rtas*
|
|||
PowerNV (Non-Virtualized)
|
||||
M: Cédric Le Goater <clg@kaod.org>
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/ppc/pnv*
|
||||
|
@ -1304,6 +1307,8 @@ F: hw/ppc/virtex_ml507.c
|
|||
|
||||
sam460ex
|
||||
M: BALATON Zoltan <balaton@eik.bme.hu>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/ppc/sam460ex.c
|
||||
|
@ -1334,14 +1339,20 @@ L: qemu-riscv@nongnu.org
|
|||
S: Supported
|
||||
F: hw/riscv/microchip_pfsoc.c
|
||||
F: hw/char/mchp_pfsoc_mmuart.c
|
||||
F: hw/misc/mchp_pfsoc_dmc.c
|
||||
F: hw/misc/mchp_pfsoc_ioscb.c
|
||||
F: hw/misc/mchp_pfsoc_sysreg.c
|
||||
F: include/hw/riscv/microchip_pfsoc.h
|
||||
F: include/hw/char/mchp_pfsoc_mmuart.h
|
||||
F: include/hw/misc/mchp_pfsoc_dmc.h
|
||||
F: include/hw/misc/mchp_pfsoc_ioscb.h
|
||||
F: include/hw/misc/mchp_pfsoc_sysreg.h
|
||||
|
||||
RX Machines
|
||||
-----------
|
||||
rx-gdbsim
|
||||
M: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: docs/system/target-rx.rst
|
||||
F: hw/rx/rx-gdbsim.c
|
||||
F: tests/acceptance/machine_rx_gdbsim.py
|
||||
|
@ -1351,7 +1362,7 @@ SH4 Machines
|
|||
R2D
|
||||
M: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
R: Magnus Damm <magnus.damm@gmail.com>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: hw/sh4/r2d.c
|
||||
F: hw/intc/sh_intc.c
|
||||
F: include/hw/sh4/sh_intc.h
|
||||
|
@ -1424,6 +1435,7 @@ F: include/hw/s390x/
|
|||
F: hw/watchdog/wdt_diag288.c
|
||||
F: include/hw/watchdog/wdt_diag288.h
|
||||
F: default-configs/s390x-softmmu.mak
|
||||
F: tests/acceptance/machine_s390_ccw_virtio.py
|
||||
T: git https://github.com/cohuck/qemu.git s390-next
|
||||
T: git https://github.com/borntraeger/qemu.git s390-next
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
@ -1443,6 +1455,7 @@ S390 PCI
|
|||
M: Matthew Rosato <mjrosato@linux.ibm.com>
|
||||
S: Supported
|
||||
F: hw/s390x/s390-pci*
|
||||
F: include/hw/s390x/s390-pci*
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
||||
UniCore32 Machines
|
||||
|
@ -1525,6 +1538,7 @@ Machine core
|
|||
M: Eduardo Habkost <ehabkost@redhat.com>
|
||||
M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
|
||||
S: Supported
|
||||
F: cpu.c
|
||||
F: hw/core/cpu.c
|
||||
F: hw/core/machine-qmp-cmds.c
|
||||
F: hw/core/machine.c
|
||||
|
@ -1537,7 +1551,7 @@ F: include/hw/boards.h
|
|||
F: include/hw/core/cpu.h
|
||||
F: include/hw/cpu/cluster.h
|
||||
F: include/sysemu/numa.h
|
||||
T: git https://github.com/ehabkost/qemu.git machine-next
|
||||
T: git https://gitlab.com/ehabkost/qemu.git machine-next
|
||||
|
||||
Xtensa Machines
|
||||
---------------
|
||||
|
@ -1559,6 +1573,14 @@ F: hw/net/opencores_eth.c
|
|||
|
||||
Devices
|
||||
-------
|
||||
Xilinx CAN
|
||||
M: Vikram Garhwal <fnu.vikram@xilinx.com>
|
||||
M: Francisco Iglesias <francisco.iglesias@xilinx.com>
|
||||
S: Maintained
|
||||
F: hw/net/can/xlnx-*
|
||||
F: include/hw/net/xlnx-*
|
||||
F: tests/qtest/xlnx-can-test*
|
||||
|
||||
EDU
|
||||
M: Jiri Slaby <jslaby@suse.cz>
|
||||
S: Maintained
|
||||
|
@ -1604,6 +1626,7 @@ L: qemu-arm@nongnu.org
|
|||
S: Maintained
|
||||
F: hw/*/omap*
|
||||
F: include/hw/arm/omap.h
|
||||
F: docs/system/arm/sx1.rst
|
||||
|
||||
IPack
|
||||
M: Alberto Garcia <berto@igalia.com>
|
||||
|
@ -1883,7 +1906,9 @@ M: Klaus Jensen <its@irrelevant.dk>
|
|||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: hw/block/nvme*
|
||||
F: include/block/nvme.h
|
||||
F: tests/qtest/nvme-test.c
|
||||
F: docs/specs/nvme.txt
|
||||
T: git git://git.infradead.org/qemu-nvme.git nvme-next
|
||||
|
||||
megasas
|
||||
|
@ -1913,6 +1938,7 @@ Rocker
|
|||
M: Jiri Pirko <jiri@resnulli.us>
|
||||
S: Maintained
|
||||
F: hw/net/rocker/
|
||||
F: qapi/rocker.json
|
||||
F: tests/rocker/
|
||||
F: docs/specs/rocker.txt
|
||||
|
||||
|
@ -1974,6 +2000,12 @@ F: docs/specs/vmgenid.txt
|
|||
F: tests/qtest/vmgenid-test.c
|
||||
F: stubs/vmgenid.c
|
||||
|
||||
LED
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
S: Maintained
|
||||
F: include/hw/misc/led.h
|
||||
F: hw/misc/led.c
|
||||
|
||||
Unimplemented device
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
R: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
|
@ -2055,7 +2087,7 @@ R: Laszlo Ersek <lersek@redhat.com>
|
|||
R: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Supported
|
||||
F: docs/specs/fw_cfg.txt
|
||||
F: hw/nvram/fw_cfg.c
|
||||
F: hw/nvram/fw_cfg*.c
|
||||
F: stubs/fw_cfg.c
|
||||
F: include/hw/nvram/fw_cfg.h
|
||||
F: include/standard-headers/linux/qemu_fw_cfg.h
|
||||
|
@ -2064,8 +2096,9 @@ F: tests/qtest/fw_cfg-test.c
|
|||
T: git https://github.com/philmd/qemu.git fw_cfg-next
|
||||
|
||||
XIVE
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Cédric Le Goater <clg@kaod.org>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Supported
|
||||
F: hw/*/*xive*
|
||||
|
@ -2075,7 +2108,7 @@ F: docs/*/*xive*
|
|||
Renesas peripherals
|
||||
M: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
R: Magnus Damm <magnus.damm@gmail.com>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: hw/char/renesas_sci.c
|
||||
F: hw/char/sh_serial.c
|
||||
F: hw/timer/renesas_*.c
|
||||
|
@ -2086,7 +2119,7 @@ F: include/hw/timer/renesas_*.h
|
|||
|
||||
Renesas RX peripherals
|
||||
M: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: hw/intc/rx_icu.c
|
||||
F: hw/rx/
|
||||
F: include/hw/intc/rx_icu.h
|
||||
|
@ -2109,6 +2142,7 @@ S: Maintained
|
|||
F: audio/
|
||||
F: hw/audio/
|
||||
F: include/hw/audio/
|
||||
F: qapi/audio.json
|
||||
F: tests/qtest/ac97-test.c
|
||||
F: tests/qtest/es1370-test.c
|
||||
F: tests/qtest/intel-hda-test.c
|
||||
|
@ -2235,7 +2269,7 @@ Device Tree
|
|||
M: Alistair Francis <alistair.francis@wdc.com>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
S: Maintained
|
||||
F: device_tree.c
|
||||
F: softmmu/device_tree.c
|
||||
F: include/sysemu/device_tree.h
|
||||
|
||||
Dump
|
||||
|
@ -2281,10 +2315,11 @@ F: include/exec/memop.h
|
|||
F: include/exec/memory.h
|
||||
F: include/exec/ram_addr.h
|
||||
F: include/exec/ramblock.h
|
||||
F: softmmu/dma-helpers.c
|
||||
F: softmmu/ioport.c
|
||||
F: softmmu/memory.c
|
||||
F: softmmu/physmem.c
|
||||
F: include/exec/memory-internal.h
|
||||
F: exec.c
|
||||
F: scripts/coccinelle/memory-region-housekeeping.cocci
|
||||
|
||||
SPICE
|
||||
|
@ -2316,6 +2351,7 @@ M: Paolo Bonzini <pbonzini@redhat.com>
|
|||
S: Maintained
|
||||
F: include/qemu/main-loop.h
|
||||
F: include/sysemu/runstate.h
|
||||
F: include/sysemu/runstate-action.h
|
||||
F: util/main-loop.c
|
||||
F: util/qemu-timer.c
|
||||
F: softmmu/vl.c
|
||||
|
@ -2324,6 +2360,7 @@ F: softmmu/cpus.c
|
|||
F: softmmu/cpu-throttle.c
|
||||
F: softmmu/cpu-timers.c
|
||||
F: softmmu/icount.c
|
||||
F: softmmu/runstate-action.c
|
||||
F: qapi/run-state.json
|
||||
|
||||
Read, Copy, Update (RCU)
|
||||
|
@ -2371,7 +2408,7 @@ M: Igor Mammedov <imammedo@redhat.com>
|
|||
S: Maintained
|
||||
F: backends/hostmem*.c
|
||||
F: include/sysemu/hostmem.h
|
||||
T: git https://github.com/ehabkost/qemu.git machine-next
|
||||
T: git https://gitlab.com/ehabkost/qemu.git machine-next
|
||||
|
||||
Cryptodev Backends
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
|
@ -2379,11 +2416,18 @@ S: Maintained
|
|||
F: include/sysemu/cryptodev*.h
|
||||
F: backends/cryptodev*.c
|
||||
|
||||
Python library
|
||||
M: John Snow <jsnow@redhat.com>
|
||||
M: Cleber Rosa <crosa@redhat.com>
|
||||
R: Eduardo Habkost <ehabkost@redhat.com>
|
||||
S: Maintained
|
||||
F: python/
|
||||
T: git https://gitlab.com/jsnow/qemu.git python
|
||||
|
||||
Python scripts
|
||||
M: Eduardo Habkost <ehabkost@redhat.com>
|
||||
M: Cleber Rosa <crosa@redhat.com>
|
||||
S: Odd fixes
|
||||
F: python/qemu/*py
|
||||
F: scripts/*.py
|
||||
F: tests/*.py
|
||||
|
||||
|
@ -2394,7 +2438,7 @@ F: scripts/simplebench/
|
|||
|
||||
QAPI
|
||||
M: Markus Armbruster <armbru@redhat.com>
|
||||
M: Michael Roth <mdroth@linux.vnet.ibm.com>
|
||||
M: Michael Roth <michael.roth@amd.com>
|
||||
S: Supported
|
||||
F: qapi/
|
||||
X: qapi/*.json
|
||||
|
@ -2438,7 +2482,7 @@ F: tests/data/qobject/qdict.txt
|
|||
T: git https://repo.or.cz/qemu/armbru.git qapi-next
|
||||
|
||||
QEMU Guest Agent
|
||||
M: Michael Roth <mdroth@linux.vnet.ibm.com>
|
||||
M: Michael Roth <michael.roth@amd.com>
|
||||
S: Maintained
|
||||
F: qga/
|
||||
F: docs/interop/qemu-ga.rst
|
||||
|
@ -2461,7 +2505,8 @@ F: include/monitor/qdev.h
|
|||
F: include/qom/
|
||||
F: qapi/qom.json
|
||||
F: qapi/qdev.json
|
||||
F: qdev-monitor.c
|
||||
F: scripts/coccinelle/qom-parent-type.cocci
|
||||
F: softmmu/qdev-monitor.c
|
||||
F: qom/
|
||||
F: tests/check-qom-interface.c
|
||||
F: tests/check-qom-proplist.c
|
||||
|
@ -2479,7 +2524,9 @@ F: monitor/monitor-internal.h
|
|||
F: monitor/qmp*
|
||||
F: monitor/misc.c
|
||||
F: monitor/monitor.c
|
||||
F: qapi/control.json
|
||||
F: qapi/error.json
|
||||
F: qapi/introspect.json
|
||||
F: docs/devel/*qmp-*
|
||||
F: docs/interop/*qmp-*
|
||||
F: scripts/qmp/
|
||||
|
@ -2495,7 +2542,7 @@ S: Maintained
|
|||
F: softmmu/qtest.c
|
||||
F: accel/qtest/
|
||||
F: tests/qtest/
|
||||
X: tests/qtest/bios-tables-test-allowed-diff.h
|
||||
X: tests/qtest/bios-tables-test*
|
||||
|
||||
Device Fuzzing
|
||||
M: Alexander Bulekov <alxndr@bu.edu>
|
||||
|
@ -2506,7 +2553,7 @@ R: Thomas Huth <thuth@redhat.com>
|
|||
S: Maintained
|
||||
F: tests/qtest/fuzz/
|
||||
F: scripts/oss-fuzz/
|
||||
F: docs/devel/fuzzing.txt
|
||||
F: docs/devel/fuzzing.rst
|
||||
|
||||
Register API
|
||||
M: Alistair Francis <alistair@alistair23.me>
|
||||
|
@ -2540,6 +2587,7 @@ S: Maintained
|
|||
F: trace/
|
||||
F: trace-events
|
||||
F: docs/qemu-option-trace.rst.inc
|
||||
F: qapi/trace.json
|
||||
F: scripts/tracetool.py
|
||||
F: scripts/tracetool/
|
||||
F: scripts/qemu-trace-stap*
|
||||
|
@ -2591,7 +2639,7 @@ F: docs/interop/dbus-vmstate.rst
|
|||
Seccomp
|
||||
M: Eduardo Otubo <otubo@redhat.com>
|
||||
S: Supported
|
||||
F: qemu-seccomp.c
|
||||
F: softmmu/qemu-seccomp.c
|
||||
F: include/sysemu/seccomp.h
|
||||
|
||||
Cryptography
|
||||
|
@ -2599,6 +2647,7 @@ M: Daniel P. Berrange <berrange@redhat.com>
|
|||
S: Maintained
|
||||
F: crypto/
|
||||
F: include/crypto/
|
||||
F: qapi/crypto.json
|
||||
F: tests/test-crypto-*
|
||||
F: tests/benchmark-crypto-*
|
||||
F: tests/crypto-tls-*
|
||||
|
@ -2786,7 +2835,7 @@ F: scripts/gensyscalls.sh
|
|||
Tiny Code Generator (TCG)
|
||||
-------------------------
|
||||
Common TCG code
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
F: tcg/
|
||||
F: include/tcg/
|
||||
|
@ -2815,22 +2864,22 @@ F: tcg/arm/
|
|||
F: disas/arm.c
|
||||
|
||||
i386 TCG target
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
F: tcg/i386/
|
||||
F: disas/i386.c
|
||||
|
||||
MIPS TCG target
|
||||
M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
R: Aurelien Jarno <aurelien@aurel32.net>
|
||||
R: Huacai Chen <chenhc@lemote.com>
|
||||
R: Huacai Chen <chenhuacai@kernel.org>
|
||||
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: tcg/mips/
|
||||
|
||||
PPC TCG target
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Odd Fixes
|
||||
F: tcg/ppc/
|
||||
F: disas/ppc.c
|
||||
|
@ -2844,7 +2893,7 @@ F: tcg/riscv/
|
|||
F: disas/riscv.c
|
||||
|
||||
S390 TCG target
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
F: tcg/s390/
|
||||
F: disas/s390.c
|
||||
|
@ -2879,7 +2928,6 @@ F: block/rbd.c
|
|||
Sheepdog
|
||||
M: Liu Yuan <namei.unix@gmail.com>
|
||||
L: qemu-block@nongnu.org
|
||||
L: sheepdog@lists.wpkg.org
|
||||
S: Odd Fixes
|
||||
F: block/sheepdog.c
|
||||
|
||||
|
@ -2952,12 +3000,13 @@ R: Fam Zheng <fam@euphon.net>
|
|||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: block/nvme*
|
||||
F: include/block/nvme.h
|
||||
T: git https://github.com/stefanha/qemu.git block
|
||||
|
||||
Bootdevice
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
S: Maintained
|
||||
F: bootdevice.c
|
||||
F: softmmu/bootdevice.c
|
||||
|
||||
Quorum
|
||||
M: Alberto Garcia <berto@igalia.com>
|
||||
|
@ -3068,6 +3117,21 @@ L: qemu-block@nongnu.org
|
|||
S: Supported
|
||||
F: tests/image-fuzzer/
|
||||
|
||||
Vhost-user block device backend server
|
||||
M: Coiby Xu <Coiby.Xu@gmail.com>
|
||||
S: Maintained
|
||||
F: block/export/vhost-user-blk-server.c
|
||||
F: block/export/vhost-user-blk-server.h
|
||||
F: include/qemu/vhost-user-server.h
|
||||
F: tests/qtest/libqos/vhost-user-blk.c
|
||||
F: util/vhost-user-server.c
|
||||
|
||||
FUSE block device exports
|
||||
M: Max Reitz <mreitz@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: block/export/fuse.c
|
||||
|
||||
Replication
|
||||
M: Wen Congyang <wencongyang2@huawei.com>
|
||||
M: Xie Changlong <xiechanglong.d@gmail.com>
|
||||
|
@ -3126,6 +3190,8 @@ R: Wainer dos Santos Moschetta <wainersm@redhat.com>
|
|||
S: Maintained
|
||||
F: .gitlab-ci.yml
|
||||
F: .gitlab-ci.d/crossbuilds.yml
|
||||
F: .gitlab-ci.d/*py
|
||||
F: scripts/ci/gitlab-pipeline-status
|
||||
|
||||
Guest Test Compilation Support
|
||||
M: Alex Bennée <alex.bennee@linaro.org>
|
||||
|
@ -3167,7 +3233,7 @@ S: Odd Fixes
|
|||
F: scripts/git-submodule.sh
|
||||
|
||||
UI translations
|
||||
M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
|
||||
S: Orphaned
|
||||
F: po/*.po
|
||||
|
||||
Sphinx documentation configuration and build machinery
|
||||
|
|
169
Makefile
169
Makefile
|
@ -14,6 +14,8 @@ SRC_PATH=.
|
|||
# we have explicit rules for everything
|
||||
MAKEFLAGS += -rR
|
||||
|
||||
SHELL = /usr/bin/env bash -o pipefail
|
||||
|
||||
# Usage: $(call quiet-command,command and args,"NAME","args to print")
|
||||
# This will run "command and args", and either:
|
||||
# if V=1 just print the whole command and args
|
||||
|
@ -28,13 +30,21 @@ UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
|
|||
help check-help print-% \
|
||||
docker docker-% vm-help vm-test vm-build-%
|
||||
|
||||
all:
|
||||
.PHONY: all clean distclean recurse-all dist msi FORCE
|
||||
|
||||
# Don't try to regenerate Makefile or configure
|
||||
# We don't generate any of them
|
||||
Makefile: ;
|
||||
configure: ;
|
||||
|
||||
# All following code might depend on configuration variables
|
||||
ifneq ($(wildcard config-host.mak),)
|
||||
# Put the all: rule here so that config-host.mak can contain dependencies.
|
||||
all:
|
||||
include config-host.mak
|
||||
|
||||
git-submodule-update:
|
||||
.git-submodule-status: git-submodule-update config-host.mak
|
||||
Makefile: .git-submodule-status
|
||||
|
||||
.PHONY: git-submodule-update
|
||||
|
||||
|
@ -62,28 +72,7 @@ git-submodule-update:
|
|||
endif
|
||||
endif
|
||||
|
||||
export NINJA=./ninjatool
|
||||
|
||||
# Running meson regenerates both build.ninja and ninjatool, and that is
|
||||
# enough to prime the rest of the build.
|
||||
ninjatool: build.ninja
|
||||
|
||||
Makefile.ninja: build.ninja ninjatool
|
||||
./ninjatool -t ninja2make --omit clean dist uninstall cscope TAGS ctags < $< > $@
|
||||
-include Makefile.ninja
|
||||
|
||||
${ninja-targets-c_COMPILER} ${ninja-targets-cpp_COMPILER}: .var.command += -MP
|
||||
|
||||
# If MESON is empty, the rule will be re-evaluated after Makefiles are
|
||||
# reread (and MESON won't be empty anymore).
|
||||
ifneq ($(MESON),)
|
||||
Makefile.mtest: build.ninja scripts/mtest2make.py
|
||||
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
|
||||
-include Makefile.mtest
|
||||
endif
|
||||
|
||||
Makefile: .git-submodule-status
|
||||
.git-submodule-status: git-submodule-update config-host.mak
|
||||
# 0. ensure the build tree is okay
|
||||
|
||||
# Check that we're not trying to do an out-of-tree build from
|
||||
# a tree that's been used for an in-tree build.
|
||||
|
@ -95,14 +84,95 @@ seems to have been used for an in-tree build. You can fix this by running \
|
|||
endif
|
||||
endif
|
||||
|
||||
# force a rerun of configure if config-host.mak is too old or corrupted
|
||||
ifeq ($(MESON),)
|
||||
.PHONY: config-host.mak
|
||||
x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
endif
|
||||
ifeq ($(NINJA),)
|
||||
.PHONY: config-host.mak
|
||||
x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
else
|
||||
export NINJA
|
||||
endif
|
||||
ifeq ($(wildcard build.ninja),)
|
||||
.PHONY: config-host.mak
|
||||
x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
endif
|
||||
ifeq ($(origin prefix),file)
|
||||
.PHONY: config-host.mak
|
||||
x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
endif
|
||||
|
||||
# 1. ensure config-host.mak is up-to-date
|
||||
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/pc-bios $(SRC_PATH)/VERSION
|
||||
@echo $@ is out-of-date, running configure
|
||||
@echo config-host.mak is out-of-date, running configure
|
||||
@if test -f meson-private/coredata.dat; then \
|
||||
./config.status --skip-meson; \
|
||||
else \
|
||||
./config.status; \
|
||||
./config.status && touch build.ninja.stamp; \
|
||||
fi
|
||||
|
||||
# 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
|
||||
# works), but otherwise never needs to be updated
|
||||
meson-private/coredata.dat: meson.stamp
|
||||
meson.stamp: config-host.mak
|
||||
@touch meson.stamp
|
||||
|
||||
# 3. ensure generated build files are up-to-date
|
||||
|
||||
ifneq ($(NINJA),)
|
||||
Makefile.ninja: build.ninja
|
||||
$(quiet-@){ \
|
||||
echo 'ninja-targets = \'; \
|
||||
$(NINJA) -t targets all | sed 's/:.*//; $$!s/$$/ \\/'; \
|
||||
echo 'build-files = \'; \
|
||||
$(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \
|
||||
} > $@.tmp && mv $@.tmp $@
|
||||
-include Makefile.ninja
|
||||
|
||||
# A separate rule is needed for Makefile dependencies to avoid -n
|
||||
build.ninja: build.ninja.stamp
|
||||
build.ninja.stamp: meson.stamp $(build-files)
|
||||
$(NINJA) $(if $V,-v,) build.ninja && touch $@
|
||||
endif
|
||||
|
||||
ifneq ($(MESON),)
|
||||
Makefile.mtest: build.ninja scripts/mtest2make.py
|
||||
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
|
||||
-include Makefile.mtest
|
||||
endif
|
||||
|
||||
# 4. Rules to bridge to other makefiles
|
||||
|
||||
ifneq ($(NINJA),)
|
||||
MAKE.n = $(findstring n,$(firstword $(MAKEFLAGS)))
|
||||
MAKE.k = $(findstring k,$(firstword $(MAKEFLAGS)))
|
||||
MAKE.q = $(findstring q,$(firstword $(MAKEFLAGS)))
|
||||
MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
|
||||
NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \
|
||||
$(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \
|
||||
|
||||
ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
|
||||
ninja-cmd-goals += $(foreach t, $(.tests), $(.test.deps.$t))
|
||||
|
||||
makefile-targets := build.ninja ctags TAGS cscope dist clean uninstall
|
||||
# "ninja -t targets" also lists all prerequisites. If build system
|
||||
# files are marked as PHONY, however, Make will always try to execute
|
||||
# "ninja build.ninja".
|
||||
ninja-targets := $(filter-out $(build-files) $(makefile-targets), $(ninja-targets))
|
||||
.PHONY: $(ninja-targets) run-ninja
|
||||
$(ninja-targets): run-ninja
|
||||
|
||||
# Use "| cat" to give Ninja a more "make-y" output. Use "+" to bypass the
|
||||
# --output-sync line.
|
||||
run-ninja: config-host.mak
|
||||
ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
|
||||
+$(quiet-@)$(if $(MAKE.nq),@:, $(NINJA) \
|
||||
$(NINJAFLAGS) $(sort $(filter $(ninja-targets), $(ninja-cmd-goals))) | cat)
|
||||
endif
|
||||
endif
|
||||
|
||||
# Force configure to re-run if the API symbols are updated
|
||||
ifeq ($(CONFIG_PLUGIN),y)
|
||||
config-host.mak: $(SRC_PATH)/plugins/qemu-plugins.symbols
|
||||
|
@ -112,37 +182,21 @@ plugins:
|
|||
$(call quiet-command,\
|
||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C contrib/plugins V="$(V)", \
|
||||
"BUILD", "example plugins")
|
||||
endif
|
||||
endif # $(CONFIG_PLUGIN)
|
||||
|
||||
else
|
||||
else # config-host.mak does not exist
|
||||
config-host.mak:
|
||||
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
|
||||
@echo "Please call configure before running make!"
|
||||
@exit 1
|
||||
endif
|
||||
endif
|
||||
|
||||
# Only needed in case Makefile.ninja does not exist.
|
||||
.PHONY: ninja-clean ninja-distclean clean-ctlist
|
||||
clean-ctlist:
|
||||
ninja-clean::
|
||||
ninja-distclean::
|
||||
build.ninja: config-host.mak
|
||||
|
||||
# Don't try to regenerate Makefile or configure
|
||||
# We don't generate any of them
|
||||
Makefile: ;
|
||||
configure: ;
|
||||
|
||||
.PHONY: all clean distclean install \
|
||||
recurse-all dist msi FORCE
|
||||
endif # config-host.mak does not exist
|
||||
|
||||
SUBDIR_MAKEFLAGS=$(if $(V),,--no-print-directory --quiet)
|
||||
|
||||
include $(SRC_PATH)/tests/Makefile.include
|
||||
|
||||
all: recurse-all
|
||||
Makefile:
|
||||
|
||||
ROM_DIRS = $(addprefix pc-bios/, $(ROMS))
|
||||
ROM_DIRS_RULES=$(foreach t, all clean, $(addsuffix /$(t), $(ROM_DIRS)))
|
||||
|
@ -157,8 +211,9 @@ recurse-clean: $(addsuffix /clean, $(ROM_DIRS))
|
|||
|
||||
######################################################################
|
||||
|
||||
clean: recurse-clean ninja-clean clean-ctlist
|
||||
if test -f ninjatool; then ./ninjatool $(if $(V),-v,) -t clean; fi
|
||||
clean: recurse-clean
|
||||
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean || :
|
||||
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) clean-ctlist || :
|
||||
# avoid old build problems by removing potentially incorrect old files
|
||||
rm -f config.mak op-i386.h opc-i386.h gen-op-i386.h op-arm.h opc-arm.h gen-op-arm.h
|
||||
find . \( -name '*.so' -o -name '*.dll' -o -name '*.[oda]' \) -type f \
|
||||
|
@ -175,8 +230,8 @@ dist: qemu-$(VERSION).tar.bz2
|
|||
qemu-%.tar.bz2:
|
||||
$(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)"
|
||||
|
||||
distclean: clean ninja-distclean
|
||||
-test -f ninjatool && ./ninjatool $(if $(V),-v,) -t clean -g
|
||||
distclean: clean
|
||||
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
|
||||
rm -f config-host.mak config-host.h*
|
||||
rm -f tests/tcg/config-*.mak
|
||||
rm -f config-all-disas.mak config.status
|
||||
|
@ -185,28 +240,28 @@ distclean: clean ninja-distclean
|
|||
rm -f qemu-plugins-ld.symbols qemu-plugins-ld64.symbols
|
||||
rm -f *-config-target.h *-config-devices.mak *-config-devices.h
|
||||
rm -rf meson-private meson-logs meson-info compile_commands.json
|
||||
rm -f Makefile.ninja ninjatool ninjatool.stamp Makefile.mtest
|
||||
rm -f Makefile.ninja Makefile.mtest build.ninja.stamp meson.stamp
|
||||
rm -f config.log
|
||||
rm -f linux-headers/asm
|
||||
rm -Rf .sdk
|
||||
|
||||
find-src-path = find "$(SRC_PATH)/" -path "$(SRC_PATH)/meson" -prune -o -name "*.[chsS]"
|
||||
find-src-path = find "$(SRC_PATH)/" -path "$(SRC_PATH)/meson" -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)
|
||||
|
||||
.PHONY: ctags
|
||||
ctags:
|
||||
rm -f tags
|
||||
$(find-src-path) -exec ctags --append {} +
|
||||
rm -f "$(SRC_PATH)/"tags
|
||||
$(find-src-path) -exec ctags -f "$(SRC_PATH)/"tags --append {} +
|
||||
|
||||
.PHONY: TAGS
|
||||
TAGS:
|
||||
rm -f TAGS
|
||||
$(find-src-path) -exec etags --append {} +
|
||||
rm -f "$(SRC_PATH)/"TAGS
|
||||
$(find-src-path) -exec etags -f "$(SRC_PATH)/"TAGS --append {} +
|
||||
|
||||
.PHONY: cscope
|
||||
cscope:
|
||||
rm -f "$(SRC_PATH)"/cscope.*
|
||||
$(find-src-path) -print | sed -e 's,^\./,,' > "$(SRC_PATH)/cscope.files"
|
||||
cscope -b -i"$(SRC_PATH)/cscope.files"
|
||||
cscope -b -i"$(SRC_PATH)/cscope.files" -f"$(SRC_PATH)"/cscope.out
|
||||
|
||||
# Needed by "meson install"
|
||||
export DESTDIR
|
||||
|
@ -215,7 +270,7 @@ include $(SRC_PATH)/tests/docker/Makefile.include
|
|||
include $(SRC_PATH)/tests/vm/Makefile.include
|
||||
|
||||
print-help-run = printf " %-30s - %s\\n" "$1" "$2"
|
||||
print-help = $(quiet-@)$(call print-help-run,$1,$2)
|
||||
print-help = @$(call print-help-run,$1,$2)
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
|
|
|
@ -134,6 +134,14 @@ For additional information on bug reporting consult:
|
|||
* `<https://qemu.org/Contribute/ReportABug>`_
|
||||
|
||||
|
||||
ChangeLog
|
||||
=========
|
||||
|
||||
For version history and release notes, please visit
|
||||
`<https://wiki.qemu.org/ChangeLog/>`_ or look at the git history for
|
||||
more detailed information.
|
||||
|
||||
|
||||
Contact
|
||||
=======
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* QTest accelerator code
|
||||
* Dummy cpu thread code
|
||||
*
|
||||
* Copyright IBM, Corp. 2011
|
||||
*
|
||||
|
@ -13,26 +13,13 @@
|
|||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/config-file.h"
|
||||
#include "sysemu/accel.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
#include "qtest-cpus.h"
|
||||
|
||||
static void *qtest_cpu_thread_fn(void *arg)
|
||||
static void *dummy_cpu_thread_fn(void *arg)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
error_report("qtest is not supported under Windows");
|
||||
exit(1);
|
||||
#else
|
||||
CPUState *cpu = arg;
|
||||
sigset_t waitset;
|
||||
int r;
|
||||
|
@ -69,10 +56,9 @@ static void *qtest_cpu_thread_fn(void *arg)
|
|||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void qtest_start_vcpu_thread(CPUState *cpu)
|
||||
void dummy_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
|
@ -81,11 +67,6 @@ static void qtest_start_vcpu_thread(CPUState *cpu)
|
|||
qemu_cond_init(cpu->halt_cond);
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, qtest_cpu_thread_fn, cpu,
|
||||
qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
|
||||
QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
const CpusAccel qtest_cpus = {
|
||||
.create_vcpu_thread = qtest_start_vcpu_thread,
|
||||
.get_virtual_clock = qtest_get_virtual_clock,
|
||||
};
|
|
@ -745,7 +745,7 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
|
|||
assert(bmap_start % BITS_PER_LONG == 0);
|
||||
/* We should never do log_clear before log_sync */
|
||||
assert(mem->dirty_bmap);
|
||||
if (start_delta) {
|
||||
if (start_delta || bmap_npages - size / psize) {
|
||||
/* Slow path - we need to manipulate a temp bitmap */
|
||||
bmap_clear = bitmap_new(bmap_npages);
|
||||
bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
|
||||
|
@ -758,7 +758,10 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
|
|||
bitmap_clear(bmap_clear, 0, start_delta);
|
||||
d.dirty_bitmap = bmap_clear;
|
||||
} else {
|
||||
/* Fast path - start address aligns well with BITS_PER_LONG */
|
||||
/*
|
||||
* Fast path - both start and size align well with BITS_PER_LONG
|
||||
* (or the end of memory slot)
|
||||
*/
|
||||
d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
|
||||
}
|
||||
|
||||
|
@ -2013,7 +2016,6 @@ static int kvm_init(MachineState *ms)
|
|||
const KVMCapabilityInfo *missing_cap;
|
||||
int ret;
|
||||
int type = 0;
|
||||
const char *kvm_type;
|
||||
uint64_t dirty_log_manual_caps;
|
||||
|
||||
s = KVM_STATE(ms->accelerator);
|
||||
|
@ -2069,13 +2071,11 @@ static int kvm_init(MachineState *ms)
|
|||
}
|
||||
s->as = g_new0(struct KVMAs, s->nr_as);
|
||||
|
||||
kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
|
||||
if (mc->kvm_type) {
|
||||
if (object_property_find(OBJECT(current_machine), "kvm-type")) {
|
||||
g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
|
||||
"kvm-type",
|
||||
&error_abort);
|
||||
type = mc->kvm_type(ms, kvm_type);
|
||||
} else if (kvm_type) {
|
||||
ret = -EINVAL;
|
||||
fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
|
||||
goto err;
|
||||
}
|
||||
|
||||
do {
|
||||
|
@ -2239,8 +2239,10 @@ static int kvm_init(MachineState *ms)
|
|||
|
||||
kvm_memory_listener_register(s, &s->memory_listener,
|
||||
&address_space_memory, 0);
|
||||
memory_listener_register(&kvm_io_listener,
|
||||
&address_space_io);
|
||||
if (kvm_eventfds_allowed) {
|
||||
memory_listener_register(&kvm_io_listener,
|
||||
&address_space_io);
|
||||
}
|
||||
memory_listener_register(&kvm_coalesced_pio_listener,
|
||||
&address_space_io);
|
||||
|
||||
|
|
|
@ -5,3 +5,11 @@ subdir('kvm')
|
|||
subdir('tcg')
|
||||
subdir('xen')
|
||||
subdir('stubs')
|
||||
|
||||
dummy_ss = ss.source_set()
|
||||
dummy_ss.add(files(
|
||||
'dummy-cpus.c',
|
||||
))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], if_true: dummy_ss)
|
||||
specific_ss.add_all(when: ['CONFIG_XEN'], if_true: dummy_ss)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
qtest_ss = ss.source_set()
|
||||
qtest_ss.add(files(
|
||||
'qtest.c',
|
||||
'qtest-cpus.c',
|
||||
))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], if_true: qtest_ss)
|
||||
|
|
|
@ -25,7 +25,10 @@
|
|||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
#include "qtest-cpus.h"
|
||||
const CpusAccel qtest_cpus = {
|
||||
.create_vcpu_thread = dummy_start_vcpu_thread,
|
||||
.get_virtual_clock = qtest_get_virtual_clock,
|
||||
};
|
||||
|
||||
static int qtest_init_accel(MachineState *ms)
|
||||
{
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "sysemu/hax.h"
|
||||
|
||||
int hax_sync_vcpus(void)
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/xen.h"
|
||||
#include "qapi/qapi-commands-misc.h"
|
||||
#include "qapi/qapi-commands-migration.h"
|
||||
|
||||
bool xen_allowed;
|
||||
|
||||
|
|
|
@ -236,9 +236,26 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
|
|||
}
|
||||
#endif
|
||||
|
||||
void cpu_exec_step_atomic(CPUState *cpu)
|
||||
static void cpu_exec_enter(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->cpu_exec_enter) {
|
||||
cc->cpu_exec_enter(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_exec_exit(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->cpu_exec_exit) {
|
||||
cc->cpu_exec_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_exec_step_atomic(CPUState *cpu)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
target_ulong cs_base, pc;
|
||||
uint32_t flags;
|
||||
|
@ -257,11 +274,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||
|
||||
/* Since we got here, we know that parallel_cpus must be true. */
|
||||
parallel_cpus = false;
|
||||
cc->cpu_exec_enter(cpu);
|
||||
cpu_exec_enter(cpu);
|
||||
/* execute the generated code */
|
||||
trace_exec_tb(tb, pc);
|
||||
cpu_tb_exec(cpu, tb);
|
||||
cc->cpu_exec_exit(cpu);
|
||||
cpu_exec_exit(cpu);
|
||||
} else {
|
||||
/*
|
||||
* The mmap_lock is dropped by tb_gen_code if it runs out of
|
||||
|
@ -465,7 +482,9 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
cc->debug_excp_handler(cpu);
|
||||
if (cc->debug_excp_handler) {
|
||||
cc->debug_excp_handler(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
|
@ -606,7 +625,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
True when it is, and we should restart on a new TB,
|
||||
and via longjmp via cpu_loop_exit. */
|
||||
else {
|
||||
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (cc->cpu_exec_interrupt &&
|
||||
cc->cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (need_replay_interrupt(interrupt_request)) {
|
||||
replay_interrupt();
|
||||
}
|
||||
|
@ -685,7 +705,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
|||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
if (!cpu->icount_extra) {
|
||||
if (!cpu->icount_extra && insns_left < tb->icount) {
|
||||
/* Execute any remaining instructions, then let the main loop
|
||||
* handle the next event.
|
||||
*/
|
||||
|
@ -713,7 +733,7 @@ int cpu_exec(CPUState *cpu)
|
|||
|
||||
rcu_read_lock();
|
||||
|
||||
cc->cpu_exec_enter(cpu);
|
||||
cpu_exec_enter(cpu);
|
||||
|
||||
/* Calculate difference between guest clock and host clock.
|
||||
* This delay includes the delay of the last cycle, so
|
||||
|
@ -724,7 +744,7 @@ int cpu_exec(CPUState *cpu)
|
|||
|
||||
/* prepare setjmp context for exception handling */
|
||||
if (sigsetjmp(cpu->jmp_env, 0) != 0) {
|
||||
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
|
||||
#if defined(__clang__)
|
||||
/* Some compilers wrongly smash all local variables after
|
||||
* siglongjmp. There were bug reports for gcc 4.5.0 and clang.
|
||||
* Reload essential local variables here for those compilers.
|
||||
|
@ -775,7 +795,7 @@ int cpu_exec(CPUState *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
cc->cpu_exec_exit(cpu);
|
||||
cpu_exec_exit(cpu);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -409,12 +409,21 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu)
|
|||
tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
|
||||
}
|
||||
|
||||
static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
|
||||
target_ulong page, target_ulong mask)
|
||||
{
|
||||
page &= mask;
|
||||
mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
|
||||
|
||||
return (page == (tlb_entry->addr_read & mask) ||
|
||||
page == (tlb_addr_write(tlb_entry) & mask) ||
|
||||
page == (tlb_entry->addr_code & mask));
|
||||
}
|
||||
|
||||
static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
|
||||
target_ulong page)
|
||||
{
|
||||
return tlb_hit_page(tlb_entry->addr_read, page) ||
|
||||
tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
|
||||
tlb_hit_page(tlb_entry->addr_code, page);
|
||||
return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -427,31 +436,45 @@ static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
|
|||
}
|
||||
|
||||
/* Called with tlb_c.lock held */
|
||||
static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
|
||||
target_ulong page)
|
||||
static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
|
||||
target_ulong page,
|
||||
target_ulong mask)
|
||||
{
|
||||
if (tlb_hit_page_anyprot(tlb_entry, page)) {
|
||||
if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
|
||||
memset(tlb_entry, -1, sizeof(*tlb_entry));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
|
||||
target_ulong page)
|
||||
{
|
||||
return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
|
||||
}
|
||||
|
||||
/* Called with tlb_c.lock held */
|
||||
static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
|
||||
target_ulong page)
|
||||
static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
|
||||
target_ulong page,
|
||||
target_ulong mask)
|
||||
{
|
||||
CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
|
||||
int k;
|
||||
|
||||
assert_cpu_is_self(env_cpu(env));
|
||||
for (k = 0; k < CPU_VTLB_SIZE; k++) {
|
||||
if (tlb_flush_entry_locked(&d->vtable[k], page)) {
|
||||
if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
|
||||
tlb_n_used_entries_dec(env, mmu_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
|
||||
target_ulong page)
|
||||
{
|
||||
tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
|
||||
}
|
||||
|
||||
static void tlb_flush_page_locked(CPUArchState *env, int midx,
|
||||
target_ulong page)
|
||||
{
|
||||
|
@ -666,6 +689,240 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
|
|||
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
|
||||
}
|
||||
|
||||
static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
|
||||
target_ulong page, unsigned bits)
|
||||
{
|
||||
CPUTLBDesc *d = &env_tlb(env)->d[midx];
|
||||
CPUTLBDescFast *f = &env_tlb(env)->f[midx];
|
||||
target_ulong mask = MAKE_64BIT_MASK(0, bits);
|
||||
|
||||
/*
|
||||
* If @bits is smaller than the tlb size, there may be multiple entries
|
||||
* within the TLB; otherwise all addresses that match under @mask hit
|
||||
* the same TLB entry.
|
||||
*
|
||||
* TODO: Perhaps allow bits to be a few bits less than the size.
|
||||
* For now, just flush the entire TLB.
|
||||
*/
|
||||
if (mask < f->mask) {
|
||||
tlb_debug("forcing full flush midx %d ("
|
||||
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
|
||||
midx, page, mask);
|
||||
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check if we need to flush due to large pages. */
|
||||
if ((page & d->large_page_mask) == d->large_page_addr) {
|
||||
tlb_debug("forcing full flush midx %d ("
|
||||
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
|
||||
midx, d->large_page_addr, d->large_page_mask);
|
||||
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
|
||||
return;
|
||||
}
|
||||
|
||||
if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
|
||||
tlb_n_used_entries_dec(env, midx);
|
||||
}
|
||||
tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
target_ulong addr;
|
||||
uint16_t idxmap;
|
||||
uint16_t bits;
|
||||
} TLBFlushPageBitsByMMUIdxData;
|
||||
|
||||
static void
|
||||
tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
|
||||
TLBFlushPageBitsByMMUIdxData d)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
int mmu_idx;
|
||||
|
||||
assert_cpu_is_self(cpu);
|
||||
|
||||
tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
|
||||
d.addr, d.bits, d.idxmap);
|
||||
|
||||
qemu_spin_lock(&env_tlb(env)->c.lock);
|
||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
if ((d.idxmap >> mmu_idx) & 1) {
|
||||
tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
|
||||
}
|
||||
}
|
||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||
|
||||
tb_flush_jmp_cache(cpu, d.addr);
|
||||
}
|
||||
|
||||
static bool encode_pbm_to_runon(run_on_cpu_data *out,
|
||||
TLBFlushPageBitsByMMUIdxData d)
|
||||
{
|
||||
/* We need 6 bits to hold to hold @bits up to 63. */
|
||||
if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) {
|
||||
*out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static TLBFlushPageBitsByMMUIdxData
|
||||
decode_runon_to_pbm(run_on_cpu_data data)
|
||||
{
|
||||
target_ulong addr_map_bits = (target_ulong) data.target_ptr;
|
||||
return (TLBFlushPageBitsByMMUIdxData){
|
||||
.addr = addr_map_bits & TARGET_PAGE_MASK,
|
||||
.idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6,
|
||||
.bits = addr_map_bits & 0x3f
|
||||
};
|
||||
}
|
||||
|
||||
static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu,
|
||||
run_on_cpu_data runon)
|
||||
{
|
||||
tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon));
|
||||
}
|
||||
|
||||
static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
|
||||
run_on_cpu_data data)
|
||||
{
|
||||
TLBFlushPageBitsByMMUIdxData *d = data.host_ptr;
|
||||
tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d);
|
||||
g_free(d);
|
||||
}
|
||||
|
||||
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
|
||||
uint16_t idxmap, unsigned bits)
|
||||
{
|
||||
TLBFlushPageBitsByMMUIdxData d;
|
||||
run_on_cpu_data runon;
|
||||
|
||||
/* If all bits are significant, this devolves to tlb_flush_page. */
|
||||
if (bits >= TARGET_LONG_BITS) {
|
||||
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
|
||||
return;
|
||||
}
|
||||
/* If no page bits are significant, this devolves to tlb_flush. */
|
||||
if (bits < TARGET_PAGE_BITS) {
|
||||
tlb_flush_by_mmuidx(cpu, idxmap);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This should already be page aligned */
|
||||
d.addr = addr & TARGET_PAGE_MASK;
|
||||
d.idxmap = idxmap;
|
||||
d.bits = bits;
|
||||
|
||||
if (qemu_cpu_is_self(cpu)) {
|
||||
tlb_flush_page_bits_by_mmuidx_async_0(cpu, d);
|
||||
} else if (encode_pbm_to_runon(&runon, d)) {
|
||||
async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
|
||||
} else {
|
||||
TLBFlushPageBitsByMMUIdxData *p
|
||||
= g_new(TLBFlushPageBitsByMMUIdxData, 1);
|
||||
|
||||
/* Otherwise allocate a structure, freed by the worker. */
|
||||
*p = d;
|
||||
async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
|
||||
RUN_ON_CPU_HOST_PTR(p));
|
||||
}
|
||||
}
|
||||
|
||||
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
|
||||
target_ulong addr,
|
||||
uint16_t idxmap,
|
||||
unsigned bits)
|
||||
{
|
||||
TLBFlushPageBitsByMMUIdxData d;
|
||||
run_on_cpu_data runon;
|
||||
|
||||
/* If all bits are significant, this devolves to tlb_flush_page. */
|
||||
if (bits >= TARGET_LONG_BITS) {
|
||||
tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
|
||||
return;
|
||||
}
|
||||
/* If no page bits are significant, this devolves to tlb_flush. */
|
||||
if (bits < TARGET_PAGE_BITS) {
|
||||
tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This should already be page aligned */
|
||||
d.addr = addr & TARGET_PAGE_MASK;
|
||||
d.idxmap = idxmap;
|
||||
d.bits = bits;
|
||||
|
||||
if (encode_pbm_to_runon(&runon, d)) {
|
||||
flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
|
||||
} else {
|
||||
CPUState *dst_cpu;
|
||||
TLBFlushPageBitsByMMUIdxData *p;
|
||||
|
||||
/* Allocate a separate data block for each destination cpu. */
|
||||
CPU_FOREACH(dst_cpu) {
|
||||
if (dst_cpu != src_cpu) {
|
||||
p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
|
||||
*p = d;
|
||||
async_run_on_cpu(dst_cpu,
|
||||
tlb_flush_page_bits_by_mmuidx_async_2,
|
||||
RUN_ON_CPU_HOST_PTR(p));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d);
|
||||
}
|
||||
|
||||
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
|
||||
target_ulong addr,
|
||||
uint16_t idxmap,
|
||||
unsigned bits)
|
||||
{
|
||||
TLBFlushPageBitsByMMUIdxData d;
|
||||
run_on_cpu_data runon;
|
||||
|
||||
/* If all bits are significant, this devolves to tlb_flush_page. */
|
||||
if (bits >= TARGET_LONG_BITS) {
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
|
||||
return;
|
||||
}
|
||||
/* If no page bits are significant, this devolves to tlb_flush. */
|
||||
if (bits < TARGET_PAGE_BITS) {
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This should already be page aligned */
|
||||
d.addr = addr & TARGET_PAGE_MASK;
|
||||
d.idxmap = idxmap;
|
||||
d.bits = bits;
|
||||
|
||||
if (encode_pbm_to_runon(&runon, d)) {
|
||||
flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
|
||||
async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1,
|
||||
runon);
|
||||
} else {
|
||||
CPUState *dst_cpu;
|
||||
TLBFlushPageBitsByMMUIdxData *p;
|
||||
|
||||
/* Allocate a separate data block for each destination cpu. */
|
||||
CPU_FOREACH(dst_cpu) {
|
||||
if (dst_cpu != src_cpu) {
|
||||
p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
|
||||
*p = d;
|
||||
async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
|
||||
RUN_ON_CPU_HOST_PTR(p));
|
||||
}
|
||||
}
|
||||
|
||||
p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
|
||||
*p = d;
|
||||
async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
|
||||
RUN_ON_CPU_HOST_PTR(p));
|
||||
}
|
||||
}
|
||||
|
||||
/* update the TLBs so that writes to code in the virtual page 'addr'
|
||||
can be detected */
|
||||
void tlb_protect_code(ram_addr_t ram_addr)
|
||||
|
|
|
@ -12,4 +12,11 @@ tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c'))
|
|||
tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c'), libdl])
|
||||
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
|
||||
|
||||
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files('tcg-all.c', 'cputlb.c', 'tcg-cpus.c'))
|
||||
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
|
||||
'tcg-all.c',
|
||||
'cputlb.c',
|
||||
'tcg-cpus.c',
|
||||
'tcg-cpus-mttcg.c',
|
||||
'tcg-cpus-icount.c',
|
||||
'tcg-cpus-rr.c'
|
||||
))
|
||||
|
|
|
@ -104,8 +104,19 @@ static int tcg_init(MachineState *ms)
|
|||
|
||||
tcg_exec_init(s->tb_size * 1024 * 1024);
|
||||
mttcg_enabled = s->mttcg_enabled;
|
||||
cpus_register_accel(&tcg_cpus);
|
||||
|
||||
/*
|
||||
* Initialize TCG regions
|
||||
*/
|
||||
tcg_region_init();
|
||||
|
||||
if (mttcg_enabled) {
|
||||
cpus_register_accel(&tcg_cpus_mttcg);
|
||||
} else if (icount_enabled()) {
|
||||
cpus_register_accel(&tcg_cpus_icount);
|
||||
} else {
|
||||
cpus_register_accel(&tcg_cpus_rr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
* QEMU TCG Single Threaded vCPUs implementation using instruction counting
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "hw/boards.h"
|
||||
|
||||
#include "tcg-cpus.h"
|
||||
#include "tcg-cpus-icount.h"
|
||||
#include "tcg-cpus-rr.h"
|
||||
|
||||
static int64_t icount_get_limit(void)
|
||||
{
|
||||
int64_t deadline;
|
||||
|
||||
if (replay_mode != REPLAY_MODE_PLAY) {
|
||||
/*
|
||||
* Include all the timers, because they may need an attention.
|
||||
* Too long CPU execution may create unnecessary delay in UI.
|
||||
*/
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
/* Check realtime timers, because they help with input processing */
|
||||
deadline = qemu_soonest_timeout(deadline,
|
||||
qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
|
||||
QEMU_TIMER_ATTR_ALL));
|
||||
|
||||
/*
|
||||
* Maintain prior (possibly buggy) behaviour where if no deadline
|
||||
* was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
|
||||
* INT32_MAX nanoseconds ahead, we still use INT32_MAX
|
||||
* nanoseconds.
|
||||
*/
|
||||
if ((deadline < 0) || (deadline > INT32_MAX)) {
|
||||
deadline = INT32_MAX;
|
||||
}
|
||||
|
||||
return icount_round(deadline);
|
||||
} else {
|
||||
return replay_get_instructions();
|
||||
}
|
||||
}
|
||||
|
||||
static void icount_notify_aio_contexts(void)
|
||||
{
|
||||
/* Wake up other AioContexts. */
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
|
||||
void icount_handle_deadline(void)
|
||||
{
|
||||
assert(qemu_in_vcpu_thread());
|
||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
|
||||
if (deadline == 0) {
|
||||
icount_notify_aio_contexts();
|
||||
}
|
||||
}
|
||||
|
||||
void icount_prepare_for_run(CPUState *cpu)
|
||||
{
|
||||
int insns_left;
|
||||
|
||||
/*
|
||||
* These should always be cleared by icount_process_data after
|
||||
* each vCPU execution. However u16.high can be raised
|
||||
* asynchronously by cpu_exit/cpu_interrupt/tcg_cpus_handle_interrupt
|
||||
*/
|
||||
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
|
||||
g_assert(cpu->icount_extra == 0);
|
||||
|
||||
cpu->icount_budget = icount_get_limit();
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
replay_mutex_lock();
|
||||
|
||||
if (cpu->icount_budget == 0 && replay_has_checkpoint()) {
|
||||
icount_notify_aio_contexts();
|
||||
}
|
||||
}
|
||||
|
||||
void icount_process_data(CPUState *cpu)
|
||||
{
|
||||
/* Account for executed instructions */
|
||||
icount_update(cpu);
|
||||
|
||||
/* Reset the counters */
|
||||
cpu_neg(cpu)->icount_decr.u16.low = 0;
|
||||
cpu->icount_extra = 0;
|
||||
cpu->icount_budget = 0;
|
||||
|
||||
replay_account_executed_instructions();
|
||||
|
||||
replay_mutex_unlock();
|
||||
}
|
||||
|
||||
static void icount_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
int old_mask = cpu->interrupt_request;
|
||||
|
||||
tcg_cpus_handle_interrupt(cpu, mask);
|
||||
if (qemu_cpu_is_self(cpu) &&
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
}
|
||||
|
||||
const CpusAccel tcg_cpus_icount = {
|
||||
.create_vcpu_thread = rr_start_vcpu_thread,
|
||||
.kick_vcpu_thread = rr_kick_vcpu_thread,
|
||||
|
||||
.handle_interrupt = icount_handle_interrupt,
|
||||
.get_virtual_clock = icount_get,
|
||||
.get_elapsed_ticks = icount_get,
|
||||
};
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* QEMU TCG Single Threaded vCPUs implementation using instruction counting
|
||||
*
|
||||
* Copyright 2020 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef TCG_CPUS_ICOUNT_H
|
||||
#define TCG_CPUS_ICOUNT_H
|
||||
|
||||
void icount_handle_deadline(void);
|
||||
void icount_prepare_for_run(CPUState *cpu);
|
||||
void icount_process_data(CPUState *cpu);
|
||||
|
||||
#endif /* TCG_CPUS_ICOUNT_H */
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* QEMU TCG Multi Threaded vCPUs implementation
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "hw/boards.h"
|
||||
|
||||
#include "tcg-cpus.h"
|
||||
|
||||
/*
|
||||
* In the multi-threaded case each vCPU has its own thread. The TLS
|
||||
* variable current_cpu can be used deep in the code to find the
|
||||
* current CPUState for a given thread.
|
||||
*/
|
||||
|
||||
static void *mttcg_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
assert(tcg_enabled());
|
||||
g_assert(!icount_enabled());
|
||||
|
||||
rcu_register_thread();
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
/* process any pending work */
|
||||
cpu->exit_request = 1;
|
||||
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
qemu_mutex_unlock_iothread();
|
||||
r = tcg_cpus_exec(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
switch (r) {
|
||||
case EXCP_DEBUG:
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
case EXCP_HALTED:
|
||||
/*
|
||||
* during start-up the vCPU is reset and the thread is
|
||||
* kicked several times. If we don't ensure we go back
|
||||
* to sleep in the halted state we won't cleanly
|
||||
* start-up when the vCPU is enabled.
|
||||
*
|
||||
* cpu->halted should ensure we sleep in wait_io_event
|
||||
*/
|
||||
g_assert(cpu->halted);
|
||||
break;
|
||||
case EXCP_ATOMIC:
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
default:
|
||||
/* Ignore everything else? */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
qatomic_mb_set(&cpu->exit_request, 0);
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
tcg_cpus_destroy(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void mttcg_kick_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
|
||||
static void mttcg_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
g_assert(tcg_enabled());
|
||||
|
||||
parallel_cpus = (current_machine->smp.max_cpus > 1);
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
/* create a thread per vCPU with TCG (MTTCG) */
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
|
||||
cpu->cpu_index);
|
||||
|
||||
qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
|
||||
#ifdef _WIN32
|
||||
cpu->hThread = qemu_thread_get_handle(cpu->thread);
|
||||
#endif
|
||||
}
|
||||
|
||||
const CpusAccel tcg_cpus_mttcg = {
|
||||
.create_vcpu_thread = mttcg_start_vcpu_thread,
|
||||
.kick_vcpu_thread = mttcg_kick_vcpu_thread,
|
||||
|
||||
.handle_interrupt = tcg_cpus_handle_interrupt,
|
||||
};
|
|
@ -0,0 +1,305 @@
|
|||
/*
|
||||
* QEMU TCG Single Threaded vCPUs implementation
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "hw/boards.h"
|
||||
|
||||
#include "tcg-cpus.h"
|
||||
#include "tcg-cpus-rr.h"
|
||||
#include "tcg-cpus-icount.h"
|
||||
|
||||
/* Kick all RR vCPUs */
|
||||
void rr_kick_vcpu_thread(CPUState *unused)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_exit(cpu);
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* TCG vCPU kick timer
|
||||
*
|
||||
* The kick timer is responsible for moving single threaded vCPU
|
||||
* emulation on to the next vCPU. If more than one vCPU is running a
|
||||
* timer event with force a cpu->exit so the next vCPU can get
|
||||
* scheduled.
|
||||
*
|
||||
* The timer is removed if all vCPUs are idle and restarted again once
|
||||
* idleness is complete.
|
||||
*/
|
||||
|
||||
static QEMUTimer *rr_kick_vcpu_timer;
|
||||
static CPUState *rr_current_cpu;
|
||||
|
||||
#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
|
||||
|
||||
static inline int64_t rr_next_kick_time(void)
|
||||
{
|
||||
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
|
||||
}
|
||||
|
||||
/* Kick the currently round-robin scheduled vCPU to next */
|
||||
static void rr_kick_next_cpu(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
do {
|
||||
cpu = qatomic_mb_read(&rr_current_cpu);
|
||||
if (cpu) {
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
} while (cpu != qatomic_mb_read(&rr_current_cpu));
|
||||
}
|
||||
|
||||
static void rr_kick_thread(void *opaque)
|
||||
{
|
||||
timer_mod(rr_kick_vcpu_timer, rr_next_kick_time());
|
||||
rr_kick_next_cpu();
|
||||
}
|
||||
|
||||
static void rr_start_kick_timer(void)
|
||||
{
|
||||
if (!rr_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
|
||||
rr_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
rr_kick_thread, NULL);
|
||||
}
|
||||
if (rr_kick_vcpu_timer && !timer_pending(rr_kick_vcpu_timer)) {
|
||||
timer_mod(rr_kick_vcpu_timer, rr_next_kick_time());
|
||||
}
|
||||
}
|
||||
|
||||
static void rr_stop_kick_timer(void)
|
||||
{
|
||||
if (rr_kick_vcpu_timer && timer_pending(rr_kick_vcpu_timer)) {
|
||||
timer_del(rr_kick_vcpu_timer);
|
||||
}
|
||||
}
|
||||
|
||||
static void rr_wait_io_event(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
while (all_cpu_threads_idle()) {
|
||||
rr_stop_kick_timer();
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
}
|
||||
|
||||
rr_start_kick_timer();
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroy any remaining vCPUs which have been unplugged and have
|
||||
* finished running
|
||||
*/
|
||||
static void rr_deal_with_unplugged_cpus(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->unplug && !cpu_can_run(cpu)) {
|
||||
tcg_cpus_destroy(cpu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In the single-threaded case each vCPU is simulated in turn. If
|
||||
* there is more than a single vCPU we create a simple timer to kick
|
||||
* the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
|
||||
* This is done explicitly rather than relying on side-effects
|
||||
* elsewhere.
|
||||
*/
|
||||
|
||||
static void *rr_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
assert(tcg_enabled());
|
||||
rcu_register_thread();
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
/* wait for initial kick-off after machine start */
|
||||
while (first_cpu->stopped) {
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
|
||||
/* process any pending work */
|
||||
CPU_FOREACH(cpu) {
|
||||
current_cpu = cpu;
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
rr_start_kick_timer();
|
||||
|
||||
cpu = first_cpu;
|
||||
|
||||
/* process any pending work */
|
||||
cpu->exit_request = 1;
|
||||
|
||||
while (1) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
replay_mutex_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
if (icount_enabled()) {
|
||||
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
|
||||
icount_account_warp_timer();
|
||||
/*
|
||||
* Run the timers here. This is much more efficient than
|
||||
* waking up the I/O thread and waiting for completion.
|
||||
*/
|
||||
icount_handle_deadline();
|
||||
}
|
||||
|
||||
replay_mutex_unlock();
|
||||
|
||||
if (!cpu) {
|
||||
cpu = first_cpu;
|
||||
}
|
||||
|
||||
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
|
||||
|
||||
qatomic_mb_set(&rr_current_cpu, cpu);
|
||||
current_cpu = cpu;
|
||||
|
||||
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
|
||||
(cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
||||
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (icount_enabled()) {
|
||||
icount_prepare_for_run(cpu);
|
||||
}
|
||||
r = tcg_cpus_exec(cpu);
|
||||
if (icount_enabled()) {
|
||||
icount_process_data(cpu);
|
||||
}
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
} else if (r == EXCP_ATOMIC) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
break;
|
||||
}
|
||||
} else if (cpu->stop) {
|
||||
if (cpu->unplug) {
|
||||
cpu = CPU_NEXT(cpu);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
cpu = CPU_NEXT(cpu);
|
||||
} /* while (cpu && !cpu->exit_request).. */
|
||||
|
||||
/* Does not need qatomic_mb_set because a spurious wakeup is okay. */
|
||||
qatomic_set(&rr_current_cpu, NULL);
|
||||
|
||||
if (cpu && cpu->exit_request) {
|
||||
qatomic_mb_set(&cpu->exit_request, 0);
|
||||
}
|
||||
|
||||
if (icount_enabled() && all_cpu_threads_idle()) {
|
||||
/*
|
||||
* When all cpus are sleeping (e.g in WFI), to avoid a deadlock
|
||||
* in the main_loop, wake it up in order to start the warp timer.
|
||||
*/
|
||||
qemu_notify_event();
|
||||
}
|
||||
|
||||
rr_wait_io_event();
|
||||
rr_deal_with_unplugged_cpus();
|
||||
}
|
||||
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void rr_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
static QemuCond *single_tcg_halt_cond;
|
||||
static QemuThread *single_tcg_cpu_thread;
|
||||
|
||||
g_assert(tcg_enabled());
|
||||
parallel_cpus = false;
|
||||
|
||||
if (!single_tcg_cpu_thread) {
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
/* share a single thread for all cpus with TCG */
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
|
||||
qemu_thread_create(cpu->thread, thread_name,
|
||||
rr_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
|
||||
single_tcg_halt_cond = cpu->halt_cond;
|
||||
single_tcg_cpu_thread = cpu->thread;
|
||||
#ifdef _WIN32
|
||||
cpu->hThread = qemu_thread_get_handle(cpu->thread);
|
||||
#endif
|
||||
} else {
|
||||
/* we share the thread */
|
||||
cpu->thread = single_tcg_cpu_thread;
|
||||
cpu->halt_cond = single_tcg_halt_cond;
|
||||
cpu->thread_id = first_cpu->thread_id;
|
||||
cpu->can_do_io = 1;
|
||||
cpu->created = true;
|
||||
}
|
||||
}
|
||||
|
||||
const CpusAccel tcg_cpus_rr = {
|
||||
.create_vcpu_thread = rr_start_vcpu_thread,
|
||||
.kick_vcpu_thread = rr_kick_vcpu_thread,
|
||||
|
||||
.handle_interrupt = tcg_cpus_handle_interrupt,
|
||||
};
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* QEMU TCG Single Threaded vCPUs implementation
|
||||
*
|
||||
* Copyright 2020 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef TCG_CPUS_RR_H
|
||||
#define TCG_CPUS_RR_H
|
||||
|
||||
#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
|
||||
|
||||
/* Kick all RR vCPUs. */
|
||||
void rr_kick_vcpu_thread(CPUState *unused);
|
||||
|
||||
/* start the round robin vcpu thread */
|
||||
void rr_start_vcpu_thread(CPUState *cpu);
|
||||
|
||||
#endif /* TCG_CPUS_RR_H */
|
|
@ -1,5 +1,7 @@
|
|||
/*
|
||||
* QEMU System Emulator
|
||||
* QEMU TCG vCPU common functionality
|
||||
*
|
||||
* Functionality common to all TCG vCPU variants: mttcg, rr and icount.
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
|
@ -34,207 +36,19 @@
|
|||
|
||||
#include "tcg-cpus.h"
|
||||
|
||||
/* Kick all RR vCPUs */
|
||||
static void qemu_cpu_kick_rr_cpus(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
/* common functionality among all TCG variants */
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_exit(cpu);
|
||||
};
|
||||
void tcg_cpus_destroy(CPUState *cpu)
|
||||
{
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
}
|
||||
|
||||
static void tcg_kick_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
if (qemu_tcg_mttcg_enabled()) {
|
||||
cpu_exit(cpu);
|
||||
} else {
|
||||
qemu_cpu_kick_rr_cpus();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TCG vCPU kick timer
|
||||
*
|
||||
* The kick timer is responsible for moving single threaded vCPU
|
||||
* emulation on to the next vCPU. If more than one vCPU is running a
|
||||
* timer event with force a cpu->exit so the next vCPU can get
|
||||
* scheduled.
|
||||
*
|
||||
* The timer is removed if all vCPUs are idle and restarted again once
|
||||
* idleness is complete.
|
||||
*/
|
||||
|
||||
static QEMUTimer *tcg_kick_vcpu_timer;
|
||||
static CPUState *tcg_current_rr_cpu;
|
||||
|
||||
#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
|
||||
|
||||
static inline int64_t qemu_tcg_next_kick(void)
|
||||
{
|
||||
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
|
||||
}
|
||||
|
||||
/* Kick the currently round-robin scheduled vCPU to next */
|
||||
static void qemu_cpu_kick_rr_next_cpu(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
do {
|
||||
cpu = qatomic_mb_read(&tcg_current_rr_cpu);
|
||||
if (cpu) {
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
} while (cpu != qatomic_mb_read(&tcg_current_rr_cpu));
|
||||
}
|
||||
|
||||
static void kick_tcg_thread(void *opaque)
|
||||
{
|
||||
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
|
||||
qemu_cpu_kick_rr_next_cpu();
|
||||
}
|
||||
|
||||
static void start_tcg_kick_timer(void)
|
||||
{
|
||||
assert(!mttcg_enabled);
|
||||
if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
|
||||
tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
kick_tcg_thread, NULL);
|
||||
}
|
||||
if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
|
||||
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
|
||||
}
|
||||
}
|
||||
|
||||
static void stop_tcg_kick_timer(void)
|
||||
{
|
||||
assert(!mttcg_enabled);
|
||||
if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
|
||||
timer_del(tcg_kick_vcpu_timer);
|
||||
}
|
||||
}
|
||||
|
||||
static void qemu_tcg_destroy_vcpu(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void qemu_tcg_rr_wait_io_event(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
while (all_cpu_threads_idle()) {
|
||||
stop_tcg_kick_timer();
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
}
|
||||
|
||||
start_tcg_kick_timer();
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t tcg_get_icount_limit(void)
|
||||
{
|
||||
int64_t deadline;
|
||||
|
||||
if (replay_mode != REPLAY_MODE_PLAY) {
|
||||
/*
|
||||
* Include all the timers, because they may need an attention.
|
||||
* Too long CPU execution may create unnecessary delay in UI.
|
||||
*/
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
/* Check realtime timers, because they help with input processing */
|
||||
deadline = qemu_soonest_timeout(deadline,
|
||||
qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
|
||||
QEMU_TIMER_ATTR_ALL));
|
||||
|
||||
/*
|
||||
* Maintain prior (possibly buggy) behaviour where if no deadline
|
||||
* was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
|
||||
* INT32_MAX nanoseconds ahead, we still use INT32_MAX
|
||||
* nanoseconds.
|
||||
*/
|
||||
if ((deadline < 0) || (deadline > INT32_MAX)) {
|
||||
deadline = INT32_MAX;
|
||||
}
|
||||
|
||||
return icount_round(deadline);
|
||||
} else {
|
||||
return replay_get_instructions();
|
||||
}
|
||||
}
|
||||
|
||||
static void notify_aio_contexts(void)
|
||||
{
|
||||
/* Wake up other AioContexts. */
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
|
||||
static void handle_icount_deadline(void)
|
||||
{
|
||||
assert(qemu_in_vcpu_thread());
|
||||
if (icount_enabled()) {
|
||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
|
||||
if (deadline == 0) {
|
||||
notify_aio_contexts();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void prepare_icount_for_run(CPUState *cpu)
|
||||
{
|
||||
if (icount_enabled()) {
|
||||
int insns_left;
|
||||
|
||||
/*
|
||||
* These should always be cleared by process_icount_data after
|
||||
* each vCPU execution. However u16.high can be raised
|
||||
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
|
||||
*/
|
||||
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
|
||||
g_assert(cpu->icount_extra == 0);
|
||||
|
||||
cpu->icount_budget = tcg_get_icount_limit();
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
replay_mutex_lock();
|
||||
|
||||
if (cpu->icount_budget == 0 && replay_has_checkpoint()) {
|
||||
notify_aio_contexts();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void process_icount_data(CPUState *cpu)
|
||||
{
|
||||
if (icount_enabled()) {
|
||||
/* Account for executed instructions */
|
||||
icount_update(cpu);
|
||||
|
||||
/* Reset the counters */
|
||||
cpu_neg(cpu)->icount_decr.u16.low = 0;
|
||||
cpu->icount_extra = 0;
|
||||
cpu->icount_budget = 0;
|
||||
|
||||
replay_account_executed_instructions();
|
||||
|
||||
replay_mutex_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
static int tcg_cpu_exec(CPUState *cpu)
|
||||
int tcg_cpus_exec(CPUState *cpu)
|
||||
{
|
||||
int ret;
|
||||
#ifdef CONFIG_PROFILER
|
||||
int64_t ti;
|
||||
#endif
|
||||
|
||||
assert(tcg_enabled());
|
||||
#ifdef CONFIG_PROFILER
|
||||
ti = profile_getclock();
|
||||
|
@ -249,298 +63,11 @@ static int tcg_cpu_exec(CPUState *cpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroy any remaining vCPUs which have been unplugged and have
|
||||
* finished running
|
||||
*/
|
||||
static void deal_with_unplugged_cpus(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->unplug && !cpu_can_run(cpu)) {
|
||||
qemu_tcg_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Single-threaded TCG
|
||||
*
|
||||
* In the single-threaded case each vCPU is simulated in turn. If
|
||||
* there is more than a single vCPU we create a simple timer to kick
|
||||
* the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
|
||||
* This is done explicitly rather than relying on side-effects
|
||||
* elsewhere.
|
||||
*/
|
||||
|
||||
static void *tcg_rr_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
assert(tcg_enabled());
|
||||
rcu_register_thread();
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
/* wait for initial kick-off after machine start */
|
||||
while (first_cpu->stopped) {
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
|
||||
/* process any pending work */
|
||||
CPU_FOREACH(cpu) {
|
||||
current_cpu = cpu;
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
start_tcg_kick_timer();
|
||||
|
||||
cpu = first_cpu;
|
||||
|
||||
/* process any pending work */
|
||||
cpu->exit_request = 1;
|
||||
|
||||
while (1) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
replay_mutex_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
|
||||
icount_account_warp_timer();
|
||||
|
||||
/*
|
||||
* Run the timers here. This is much more efficient than
|
||||
* waking up the I/O thread and waiting for completion.
|
||||
*/
|
||||
handle_icount_deadline();
|
||||
|
||||
replay_mutex_unlock();
|
||||
|
||||
if (!cpu) {
|
||||
cpu = first_cpu;
|
||||
}
|
||||
|
||||
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
|
||||
|
||||
qatomic_mb_set(&tcg_current_rr_cpu, cpu);
|
||||
current_cpu = cpu;
|
||||
|
||||
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
|
||||
(cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
||||
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
prepare_icount_for_run(cpu);
|
||||
|
||||
r = tcg_cpu_exec(cpu);
|
||||
|
||||
process_icount_data(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
} else if (r == EXCP_ATOMIC) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
break;
|
||||
}
|
||||
} else if (cpu->stop) {
|
||||
if (cpu->unplug) {
|
||||
cpu = CPU_NEXT(cpu);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
cpu = CPU_NEXT(cpu);
|
||||
} /* while (cpu && !cpu->exit_request).. */
|
||||
|
||||
/* Does not need qatomic_mb_set because a spurious wakeup is okay. */
|
||||
qatomic_set(&tcg_current_rr_cpu, NULL);
|
||||
|
||||
if (cpu && cpu->exit_request) {
|
||||
qatomic_mb_set(&cpu->exit_request, 0);
|
||||
}
|
||||
|
||||
if (icount_enabled() && all_cpu_threads_idle()) {
|
||||
/*
|
||||
* When all cpus are sleeping (e.g in WFI), to avoid a deadlock
|
||||
* in the main_loop, wake it up in order to start the warp timer.
|
||||
*/
|
||||
qemu_notify_event();
|
||||
}
|
||||
|
||||
qemu_tcg_rr_wait_io_event();
|
||||
deal_with_unplugged_cpus();
|
||||
}
|
||||
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Multi-threaded TCG
|
||||
*
|
||||
* In the multi-threaded case each vCPU has its own thread. The TLS
|
||||
* variable current_cpu can be used deep in the code to find the
|
||||
* current CPUState for a given thread.
|
||||
*/
|
||||
|
||||
static void *tcg_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
assert(tcg_enabled());
|
||||
g_assert(!icount_enabled());
|
||||
|
||||
rcu_register_thread();
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
/* process any pending work */
|
||||
cpu->exit_request = 1;
|
||||
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
qemu_mutex_unlock_iothread();
|
||||
r = tcg_cpu_exec(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
switch (r) {
|
||||
case EXCP_DEBUG:
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
case EXCP_HALTED:
|
||||
/*
|
||||
* during start-up the vCPU is reset and the thread is
|
||||
* kicked several times. If we don't ensure we go back
|
||||
* to sleep in the halted state we won't cleanly
|
||||
* start-up when the vCPU is enabled.
|
||||
*
|
||||
* cpu->halted should ensure we sleep in wait_io_event
|
||||
*/
|
||||
g_assert(cpu->halted);
|
||||
break;
|
||||
case EXCP_ATOMIC:
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
default:
|
||||
/* Ignore everything else? */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
qatomic_mb_set(&cpu->exit_request, 0);
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
qemu_tcg_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tcg_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
static QemuCond *single_tcg_halt_cond;
|
||||
static QemuThread *single_tcg_cpu_thread;
|
||||
static int tcg_region_inited;
|
||||
|
||||
assert(tcg_enabled());
|
||||
/*
|
||||
* Initialize TCG regions--once. Now is a good time, because:
|
||||
* (1) TCG's init context, prologue and target globals have been set up.
|
||||
* (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
|
||||
* -accel flag is processed, so the check doesn't work then).
|
||||
*/
|
||||
if (!tcg_region_inited) {
|
||||
tcg_region_inited = 1;
|
||||
tcg_region_init();
|
||||
parallel_cpus = qemu_tcg_mttcg_enabled() && current_machine->smp.max_cpus > 1;
|
||||
}
|
||||
|
||||
if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
if (qemu_tcg_mttcg_enabled()) {
|
||||
/* create a thread per vCPU with TCG (MTTCG) */
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
|
||||
cpu->cpu_index);
|
||||
|
||||
qemu_thread_create(cpu->thread, thread_name, tcg_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
|
||||
} else {
|
||||
/* share a single thread for all cpus with TCG */
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
|
||||
qemu_thread_create(cpu->thread, thread_name,
|
||||
tcg_rr_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
|
||||
single_tcg_halt_cond = cpu->halt_cond;
|
||||
single_tcg_cpu_thread = cpu->thread;
|
||||
}
|
||||
#ifdef _WIN32
|
||||
cpu->hThread = qemu_thread_get_handle(cpu->thread);
|
||||
#endif
|
||||
} else {
|
||||
/* For non-MTTCG cases we share the thread */
|
||||
cpu->thread = single_tcg_cpu_thread;
|
||||
cpu->halt_cond = single_tcg_halt_cond;
|
||||
cpu->thread_id = first_cpu->thread_id;
|
||||
cpu->can_do_io = 1;
|
||||
cpu->created = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t tcg_get_virtual_clock(void)
|
||||
{
|
||||
if (icount_enabled()) {
|
||||
return icount_get();
|
||||
}
|
||||
return cpu_get_clock();
|
||||
}
|
||||
|
||||
static int64_t tcg_get_elapsed_ticks(void)
|
||||
{
|
||||
if (icount_enabled()) {
|
||||
return icount_get();
|
||||
}
|
||||
return cpu_get_ticks();
|
||||
}
|
||||
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
void tcg_cpus_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
int old_mask;
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
old_mask = cpu->interrupt_request;
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
/*
|
||||
|
@ -551,20 +78,5 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
|||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
if (icount_enabled() &&
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const CpusAccel tcg_cpus = {
|
||||
.create_vcpu_thread = tcg_start_vcpu_thread,
|
||||
.kick_vcpu_thread = tcg_kick_vcpu_thread,
|
||||
|
||||
.handle_interrupt = tcg_handle_interrupt,
|
||||
|
||||
.get_virtual_clock = tcg_get_virtual_clock,
|
||||
.get_elapsed_ticks = tcg_get_elapsed_ticks,
|
||||
};
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
/*
|
||||
* Accelerator CPUS Interface
|
||||
* QEMU TCG vCPU common functionality
|
||||
*
|
||||
* Functionality common to all TCG vcpu variants: mttcg, rr and icount.
|
||||
*
|
||||
* Copyright 2020 SUSE LLC
|
||||
*
|
||||
|
@ -12,6 +14,12 @@
|
|||
|
||||
#include "sysemu/cpus.h"
|
||||
|
||||
extern const CpusAccel tcg_cpus;
|
||||
extern const CpusAccel tcg_cpus_mttcg;
|
||||
extern const CpusAccel tcg_cpus_icount;
|
||||
extern const CpusAccel tcg_cpus_rr;
|
||||
|
||||
void tcg_cpus_destroy(CPUState *cpu);
|
||||
int tcg_cpus_exec(CPUState *cpu);
|
||||
void tcg_cpus_handle_interrupt(CPUState *cpu, int mask);
|
||||
|
||||
#endif /* TCG_CPUS_H */
|
||||
|
|
|
@ -2267,6 +2267,10 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
|||
tb_destroy(tb);
|
||||
}
|
||||
|
||||
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
|
||||
"cpu_io_recompile: rewound execution of TB to "
|
||||
TARGET_FMT_lx "\n", tb->pc);
|
||||
|
||||
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
|
||||
* the first in the TB) then we end up generating a whole new TB and
|
||||
* repeating the fault, which is horribly inefficient.
|
||||
|
@ -2375,7 +2379,7 @@ void dump_exec_info(void)
|
|||
qemu_printf("Translation buffer state:\n");
|
||||
/*
|
||||
* Report total code size including the padding and TB structs;
|
||||
* otherwise users might think "-tb-size" is not honoured.
|
||||
* otherwise users might think "-accel tcg,tb-size" is not honoured.
|
||||
* For avg host size we use the precise numbers from tb_tree_stats though.
|
||||
*/
|
||||
qemu_printf("gen code size %zu/%zu\n",
|
||||
|
|
|
@ -9,6 +9,10 @@ void cpu_resume(CPUState *cpu)
|
|||
{
|
||||
}
|
||||
|
||||
void cpu_remove_sync(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
void qemu_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "hw/xen/xen_pt.h"
|
||||
#include "chardev/char.h"
|
||||
#include "sysemu/accel.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/xen.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "migration/misc.h"
|
||||
|
@ -153,6 +154,10 @@ static void xen_setup_post(MachineState *ms, AccelState *accel)
|
|||
}
|
||||
}
|
||||
|
||||
const CpusAccel xen_cpus = {
|
||||
.create_vcpu_thread = dummy_start_vcpu_thread,
|
||||
};
|
||||
|
||||
static int xen_init(MachineState *ms)
|
||||
{
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
|
@ -180,6 +185,9 @@ static int xen_init(MachineState *ms)
|
|||
* opt out of system RAM being allocated by generic code
|
||||
*/
|
||||
mc->default_ram_id = NULL;
|
||||
|
||||
cpus_register_accel(&xen_cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -104,9 +104,6 @@ const struct mixeng_volume nominal_volume = {
|
|||
|
||||
static bool legacy_config = true;
|
||||
|
||||
#ifdef AUDIO_IS_FLAWLESS_AND_NO_CHECKS_ARE_REQURIED
|
||||
#error No its not
|
||||
#else
|
||||
int audio_bug (const char *funcname, int cond)
|
||||
{
|
||||
if (cond) {
|
||||
|
@ -119,25 +116,11 @@ int audio_bug (const char *funcname, int cond)
|
|||
AUD_log (NULL, "I am sorry\n");
|
||||
}
|
||||
AUD_log (NULL, "Context:\n");
|
||||
|
||||
#if defined AUDIO_BREAKPOINT_ON_BUG
|
||||
# if defined HOST_I386
|
||||
# if defined __GNUC__
|
||||
__asm__ ("int3");
|
||||
# elif defined _MSC_VER
|
||||
_asm _emit 0xcc;
|
||||
# else
|
||||
abort ();
|
||||
# endif
|
||||
# else
|
||||
abort ();
|
||||
# endif
|
||||
#endif
|
||||
abort();
|
||||
}
|
||||
|
||||
return cond;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int audio_bits_to_index (int bits)
|
||||
{
|
||||
|
@ -1588,13 +1571,6 @@ static void audio_vm_change_state_handler (void *opaque, int running,
|
|||
audio_reset_timer (s);
|
||||
}
|
||||
|
||||
static bool is_cleaning_up;
|
||||
|
||||
bool audio_is_cleaning_up(void)
|
||||
{
|
||||
return is_cleaning_up;
|
||||
}
|
||||
|
||||
static void free_audio_state(AudioState *s)
|
||||
{
|
||||
HWVoiceOut *hwo, *hwon;
|
||||
|
@ -1647,7 +1623,6 @@ static void free_audio_state(AudioState *s)
|
|||
|
||||
void audio_cleanup(void)
|
||||
{
|
||||
is_cleaning_up = true;
|
||||
while (!QTAILQ_EMPTY(&audio_states)) {
|
||||
AudioState *s = QTAILQ_FIRST(&audio_states);
|
||||
QTAILQ_REMOVE(&audio_states, s, list);
|
||||
|
@ -1709,7 +1684,9 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
|
|||
* backend and this can go away.
|
||||
*/
|
||||
driver = audio_driver_lookup("spice");
|
||||
driver->can_be_default = 1;
|
||||
if (driver) {
|
||||
driver->can_be_default = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev) {
|
||||
|
|
|
@ -160,7 +160,6 @@ static inline void *advance (void *p, int incr)
|
|||
int wav_start_capture(AudioState *state, CaptureState *s, const char *path,
|
||||
int freq, int bits, int nchannels);
|
||||
|
||||
bool audio_is_cleaning_up(void);
|
||||
void audio_cleanup(void);
|
||||
|
||||
void audio_sample_to_uint64(const void *samples, int pos,
|
||||
|
|
|
@ -482,7 +482,7 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
|
|||
Audiodev *dev = drv_opaque;
|
||||
AudiodevCoreaudioPerDirectionOptions *cpdo = dev->u.coreaudio.out;
|
||||
int frames;
|
||||
struct audsettings fake_as;
|
||||
struct audsettings obt_as;
|
||||
|
||||
/* create mutex */
|
||||
err = pthread_mutex_init(&core->mutex, NULL);
|
||||
|
@ -491,8 +491,8 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
|
|||
return -1;
|
||||
}
|
||||
|
||||
fake_as = *as;
|
||||
as = &fake_as;
|
||||
obt_as = *as;
|
||||
as = &obt_as;
|
||||
as->fmt = AUDIO_FORMAT_F32;
|
||||
audio_pcm_init_info (&hw->info, as);
|
||||
|
||||
|
@ -584,17 +584,6 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
|
|||
return -1;
|
||||
}
|
||||
|
||||
/* start Playback */
|
||||
if (!isPlaying(core->outputDeviceID)) {
|
||||
status = AudioDeviceStart(core->outputDeviceID, core->ioprocid);
|
||||
if (status != kAudioHardwareNoError) {
|
||||
coreaudio_logerr2 (status, typ, "Could not start playback\n");
|
||||
AudioDeviceDestroyIOProcID(core->outputDeviceID, core->ioprocid);
|
||||
core->outputDeviceID = kAudioDeviceUnknown;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -604,22 +593,20 @@ static void coreaudio_fini_out (HWVoiceOut *hw)
|
|||
int err;
|
||||
coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw;
|
||||
|
||||
if (!audio_is_cleaning_up()) {
|
||||
/* stop playback */
|
||||
if (isPlaying(core->outputDeviceID)) {
|
||||
status = AudioDeviceStop(core->outputDeviceID, core->ioprocid);
|
||||
if (status != kAudioHardwareNoError) {
|
||||
coreaudio_logerr (status, "Could not stop playback\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* remove callback */
|
||||
status = AudioDeviceDestroyIOProcID(core->outputDeviceID,
|
||||
core->ioprocid);
|
||||
/* stop playback */
|
||||
if (isPlaying(core->outputDeviceID)) {
|
||||
status = AudioDeviceStop(core->outputDeviceID, core->ioprocid);
|
||||
if (status != kAudioHardwareNoError) {
|
||||
coreaudio_logerr (status, "Could not remove IOProc\n");
|
||||
coreaudio_logerr(status, "Could not stop playback\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* remove callback */
|
||||
status = AudioDeviceDestroyIOProcID(core->outputDeviceID,
|
||||
core->ioprocid);
|
||||
if (status != kAudioHardwareNoError) {
|
||||
coreaudio_logerr(status, "Could not remove IOProc\n");
|
||||
}
|
||||
core->outputDeviceID = kAudioDeviceUnknown;
|
||||
|
||||
/* destroy mutex */
|
||||
|
@ -644,13 +631,11 @@ static void coreaudio_enable_out(HWVoiceOut *hw, bool enable)
|
|||
}
|
||||
} else {
|
||||
/* stop playback */
|
||||
if (!audio_is_cleaning_up()) {
|
||||
if (isPlaying(core->outputDeviceID)) {
|
||||
status = AudioDeviceStop(core->outputDeviceID,
|
||||
core->ioprocid);
|
||||
if (status != kAudioHardwareNoError) {
|
||||
coreaudio_logerr (status, "Could not pause playback\n");
|
||||
}
|
||||
if (isPlaying(core->outputDeviceID)) {
|
||||
status = AudioDeviceStop(core->outputDeviceID,
|
||||
core->ioprocid);
|
||||
if (status != kAudioHardwareNoError) {
|
||||
coreaudio_logerr(status, "Could not pause playback\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "qemu/osdep.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/atomic.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu-common.h"
|
||||
#include "audio.h"
|
||||
|
||||
|
@ -63,6 +64,7 @@ typedef struct QJackClient {
|
|||
QJackState state;
|
||||
jack_client_t *client;
|
||||
jack_nframes_t freq;
|
||||
QEMUBH *shutdown_bh;
|
||||
|
||||
struct QJack *j;
|
||||
int nchannels;
|
||||
|
@ -87,6 +89,7 @@ QJackIn;
|
|||
static int qjack_client_init(QJackClient *c);
|
||||
static void qjack_client_connect_ports(QJackClient *c);
|
||||
static void qjack_client_fini(QJackClient *c);
|
||||
static QemuMutex qjack_shutdown_lock;
|
||||
|
||||
static void qjack_buffer_create(QJackBuffer *buffer, int channels, int frames)
|
||||
{
|
||||
|
@ -306,21 +309,27 @@ static int qjack_xrun(void *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void qjack_shutdown_bh(void *opaque)
|
||||
{
|
||||
QJackClient *c = (QJackClient *)opaque;
|
||||
qjack_client_fini(c);
|
||||
}
|
||||
|
||||
static void qjack_shutdown(void *arg)
|
||||
{
|
||||
QJackClient *c = (QJackClient *)arg;
|
||||
c->state = QJACK_STATE_SHUTDOWN;
|
||||
qemu_bh_schedule(c->shutdown_bh);
|
||||
}
|
||||
|
||||
static void qjack_client_recover(QJackClient *c)
|
||||
{
|
||||
if (c->state == QJACK_STATE_SHUTDOWN) {
|
||||
qjack_client_fini(c);
|
||||
if (c->state != QJACK_STATE_DISCONNECTED) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* packets is used simply to throttle this */
|
||||
if (c->state == QJACK_STATE_DISCONNECTED &&
|
||||
c->packets % 100 == 0) {
|
||||
if (c->packets % 100 == 0) {
|
||||
|
||||
/* if enabled then attempt to recover */
|
||||
if (c->enabled) {
|
||||
|
@ -489,15 +498,16 @@ static int qjack_init_out(HWVoiceOut *hw, struct audsettings *as,
|
|||
QJackOut *jo = (QJackOut *)hw;
|
||||
Audiodev *dev = (Audiodev *)drv_opaque;
|
||||
|
||||
qjack_client_fini(&jo->c);
|
||||
|
||||
jo->c.out = true;
|
||||
jo->c.enabled = false;
|
||||
jo->c.nchannels = as->nchannels;
|
||||
jo->c.opt = dev->u.jack.out;
|
||||
|
||||
jo->c.shutdown_bh = qemu_bh_new(qjack_shutdown_bh, &jo->c);
|
||||
|
||||
int ret = qjack_client_init(&jo->c);
|
||||
if (ret != 0) {
|
||||
qemu_bh_delete(jo->c.shutdown_bh);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -525,15 +535,16 @@ static int qjack_init_in(HWVoiceIn *hw, struct audsettings *as,
|
|||
QJackIn *ji = (QJackIn *)hw;
|
||||
Audiodev *dev = (Audiodev *)drv_opaque;
|
||||
|
||||
qjack_client_fini(&ji->c);
|
||||
|
||||
ji->c.out = false;
|
||||
ji->c.enabled = false;
|
||||
ji->c.nchannels = as->nchannels;
|
||||
ji->c.opt = dev->u.jack.in;
|
||||
|
||||
ji->c.shutdown_bh = qemu_bh_new(qjack_shutdown_bh, &ji->c);
|
||||
|
||||
int ret = qjack_client_init(&ji->c);
|
||||
if (ret != 0) {
|
||||
qemu_bh_delete(ji->c.shutdown_bh);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -555,7 +566,7 @@ static int qjack_init_in(HWVoiceIn *hw, struct audsettings *as,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void qjack_client_fini(QJackClient *c)
|
||||
static void qjack_client_fini_locked(QJackClient *c)
|
||||
{
|
||||
switch (c->state) {
|
||||
case QJACK_STATE_RUNNING:
|
||||
|
@ -564,28 +575,40 @@ static void qjack_client_fini(QJackClient *c)
|
|||
|
||||
case QJACK_STATE_SHUTDOWN:
|
||||
jack_client_close(c->client);
|
||||
c->client = NULL;
|
||||
|
||||
qjack_buffer_free(&c->fifo);
|
||||
g_free(c->port);
|
||||
|
||||
c->state = QJACK_STATE_DISCONNECTED;
|
||||
/* fallthrough */
|
||||
|
||||
case QJACK_STATE_DISCONNECTED:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
qjack_buffer_free(&c->fifo);
|
||||
g_free(c->port);
|
||||
|
||||
c->state = QJACK_STATE_DISCONNECTED;
|
||||
static void qjack_client_fini(QJackClient *c)
|
||||
{
|
||||
qemu_mutex_lock(&qjack_shutdown_lock);
|
||||
qjack_client_fini_locked(c);
|
||||
qemu_mutex_unlock(&qjack_shutdown_lock);
|
||||
}
|
||||
|
||||
static void qjack_fini_out(HWVoiceOut *hw)
|
||||
{
|
||||
QJackOut *jo = (QJackOut *)hw;
|
||||
qjack_client_fini(&jo->c);
|
||||
|
||||
qemu_bh_delete(jo->c.shutdown_bh);
|
||||
}
|
||||
|
||||
static void qjack_fini_in(HWVoiceIn *hw)
|
||||
{
|
||||
QJackIn *ji = (QJackIn *)hw;
|
||||
qjack_client_fini(&ji->c);
|
||||
|
||||
qemu_bh_delete(ji->c.shutdown_bh);
|
||||
}
|
||||
|
||||
static void qjack_enable_out(HWVoiceOut *hw, bool enable)
|
||||
|
@ -662,6 +685,7 @@ static void qjack_info(const char *msg)
|
|||
|
||||
static void register_audio_jack(void)
|
||||
{
|
||||
qemu_mutex_init(&qjack_shutdown_lock);
|
||||
audio_driver_register(&jack_driver);
|
||||
jack_set_thread_creator(qjack_thread_creator);
|
||||
jack_set_error_function(qjack_error);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
softmmu_ss.add([spice_headers, files('audio.c')])
|
||||
softmmu_ss.add(files(
|
||||
'audio.c',
|
||||
'audio_legacy.c',
|
||||
'mixeng.c',
|
||||
'noaudio.c',
|
||||
|
|
|
@ -106,7 +106,7 @@ static int line_out_init(HWVoiceOut *hw, struct audsettings *as,
|
|||
out->active = 0;
|
||||
|
||||
out->sin.base.sif = &playback_sif.base;
|
||||
qemu_spice_add_interface (&out->sin.base);
|
||||
qemu_spice.add_interface(&out->sin.base);
|
||||
#if SPICE_INTERFACE_PLAYBACK_MAJOR > 1 || SPICE_INTERFACE_PLAYBACK_MINOR >= 3
|
||||
spice_server_set_playback_rate(&out->sin, settings.freq);
|
||||
#endif
|
||||
|
@ -215,7 +215,7 @@ static int line_in_init(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
|
|||
in->active = 0;
|
||||
|
||||
in->sin.base.sif = &record_sif.base;
|
||||
qemu_spice_add_interface (&in->sin.base);
|
||||
qemu_spice.add_interface(&in->sin.base);
|
||||
#if SPICE_INTERFACE_RECORD_MAJOR > 2 || SPICE_INTERFACE_RECORD_MINOR >= 3
|
||||
spice_server_set_record_rate(&in->sin, settings.freq);
|
||||
#endif
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
@ -73,7 +73,8 @@ qauthz_list_file_load(QAuthZListFile *fauthz, Error **errp)
|
|||
|
||||
pdict = qobject_to(QDict, obj);
|
||||
if (!pdict) {
|
||||
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "obj", "dict");
|
||||
error_setg(errp, "File '%s' must contain a JSON object",
|
||||
fauthz->filename);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
@ -122,7 +123,15 @@ qauthz_list_file_complete(UserCreatable *uc, Error **errp)
|
|||
QAuthZListFile *fauthz = QAUTHZ_LIST_FILE(uc);
|
||||
gchar *dir = NULL, *file = NULL;
|
||||
|
||||
if (!fauthz->filename) {
|
||||
error_setg(errp, "filename not provided");
|
||||
return;
|
||||
}
|
||||
|
||||
fauthz->list = qauthz_list_file_load(fauthz, errp);
|
||||
if (!fauthz->list) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fauthz->refresh) {
|
||||
return;
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
authz_ss = ss.source_set()
|
||||
authz_ss.add(genh)
|
||||
authz_ss.add(files(
|
||||
'base.c',
|
||||
|
@ -8,12 +7,3 @@ authz_ss.add(files(
|
|||
))
|
||||
|
||||
authz_ss.add(when: ['CONFIG_AUTH_PAM', pam], if_true: files('pamacct.c'))
|
||||
|
||||
authz_ss = authz_ss.apply(config_host, strict: false)
|
||||
libauthz = static_library('authz', authz_ss.sources() + genh,
|
||||
dependencies: [authz_ss.dependencies()],
|
||||
name_suffix: 'fa',
|
||||
build_by_default: false)
|
||||
|
||||
authz = declare_dependency(link_whole: libauthz,
|
||||
dependencies: qom)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
@ -84,6 +84,12 @@ qauthz_pam_prop_get_service(Object *obj,
|
|||
static void
|
||||
qauthz_pam_complete(UserCreatable *uc, Error **errp)
|
||||
{
|
||||
QAuthZPAM *pauthz = QAUTHZ_PAM(uc);
|
||||
|
||||
if (!pauthz->service) {
|
||||
error_setg(errp, "The 'service' property must be set");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
@ -65,11 +65,25 @@ qauthz_simple_finalize(Object *obj)
|
|||
}
|
||||
|
||||
|
||||
static void
|
||||
qauthz_simple_complete(UserCreatable *uc, Error **errp)
|
||||
{
|
||||
QAuthZSimple *sauthz = QAUTHZ_SIMPLE(uc);
|
||||
|
||||
if (!sauthz->identity) {
|
||||
error_setg(errp, "The 'identity' property must be set");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
qauthz_simple_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
QAuthZClass *authz = QAUTHZ_CLASS(oc);
|
||||
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
|
||||
|
||||
ucc->complete = qauthz_simple_complete;
|
||||
authz->is_allowed = qauthz_simple_is_allowed;
|
||||
|
||||
object_class_property_add_str(oc, "identity",
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -135,12 +135,6 @@ static char *rng_egd_get_chardev(Object *obj, Error **errp)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void rng_egd_init(Object *obj)
|
||||
{
|
||||
object_property_add_str(obj, "chardev",
|
||||
rng_egd_get_chardev, rng_egd_set_chardev);
|
||||
}
|
||||
|
||||
static void rng_egd_finalize(Object *obj)
|
||||
{
|
||||
RngEgd *s = RNG_EGD(obj);
|
||||
|
@ -155,6 +149,8 @@ static void rng_egd_class_init(ObjectClass *klass, void *data)
|
|||
|
||||
rbc->request_entropy = rng_egd_request_entropy;
|
||||
rbc->opened = rng_egd_opened;
|
||||
object_class_property_add_str(klass, "chardev",
|
||||
rng_egd_get_chardev, rng_egd_set_chardev);
|
||||
}
|
||||
|
||||
static const TypeInfo rng_egd_info = {
|
||||
|
@ -162,7 +158,6 @@ static const TypeInfo rng_egd_info = {
|
|||
.parent = TYPE_RNG_BACKEND,
|
||||
.instance_size = sizeof(RngEgd),
|
||||
.class_init = rng_egd_class_init,
|
||||
.instance_init = rng_egd_init,
|
||||
.instance_finalize = rng_egd_finalize,
|
||||
};
|
||||
|
||||
|
|
|
@ -108,10 +108,6 @@ static void rng_random_init(Object *obj)
|
|||
{
|
||||
RngRandom *s = RNG_RANDOM(obj);
|
||||
|
||||
object_property_add_str(obj, "filename",
|
||||
rng_random_get_filename,
|
||||
rng_random_set_filename);
|
||||
|
||||
s->filename = g_strdup("/dev/urandom");
|
||||
s->fd = -1;
|
||||
}
|
||||
|
@ -134,6 +130,10 @@ static void rng_random_class_init(ObjectClass *klass, void *data)
|
|||
|
||||
rbc->request_entropy = rng_random_request_entropy;
|
||||
rbc->opened = rng_random_opened;
|
||||
object_class_property_add_str(klass, "filename",
|
||||
rng_random_get_filename,
|
||||
rng_random_set_filename);
|
||||
|
||||
}
|
||||
|
||||
static const TypeInfo rng_random_info = {
|
||||
|
|
|
@ -105,10 +105,6 @@ static void rng_backend_init(Object *obj)
|
|||
RngBackend *s = RNG_BACKEND(obj);
|
||||
|
||||
QSIMPLEQ_INIT(&s->requests);
|
||||
|
||||
object_property_add_bool(obj, "opened",
|
||||
rng_backend_prop_get_opened,
|
||||
rng_backend_prop_set_opened);
|
||||
}
|
||||
|
||||
static void rng_backend_finalize(Object *obj)
|
||||
|
@ -123,6 +119,10 @@ static void rng_backend_class_init(ObjectClass *oc, void *data)
|
|||
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
|
||||
|
||||
ucc->complete = rng_backend_complete;
|
||||
|
||||
object_class_property_add_bool(oc, "opened",
|
||||
rng_backend_prop_get_opened,
|
||||
rng_backend_prop_set_opened);
|
||||
}
|
||||
|
||||
static const TypeInfo rng_backend_info = {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
#include <sys/uio.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#ifdef HAVE_SYS_IOCCOM_H
|
||||
#include <sys/ioccom.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Every response from a command involving a TPM command execution must hold
|
||||
* the ptm_res as the first element.
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
@ -35,8 +35,7 @@
|
|||
static void get_tpm(Object *obj, Visitor *v, const char *name, void *opaque,
|
||||
Error **errp)
|
||||
{
|
||||
DeviceState *dev = DEVICE(obj);
|
||||
TPMBackend **be = qdev_get_prop_ptr(dev, opaque);
|
||||
TPMBackend **be = qdev_get_prop_ptr(obj, opaque);
|
||||
char *p;
|
||||
|
||||
p = g_strdup(*be ? (*be)->id : "");
|
||||
|
@ -49,7 +48,7 @@ static void set_tpm(Object *obj, Visitor *v, const char *name, void *opaque,
|
|||
{
|
||||
DeviceState *dev = DEVICE(obj);
|
||||
Property *prop = opaque;
|
||||
TPMBackend *s, **be = qdev_get_prop_ptr(dev, prop);
|
||||
TPMBackend *s, **be = qdev_get_prop_ptr(obj, prop);
|
||||
char *str;
|
||||
|
||||
if (dev->realized) {
|
||||
|
@ -73,9 +72,8 @@ static void set_tpm(Object *obj, Visitor *v, const char *name, void *opaque,
|
|||
|
||||
static void release_tpm(Object *obj, const char *name, void *opaque)
|
||||
{
|
||||
DeviceState *dev = DEVICE(obj);
|
||||
Property *prop = opaque;
|
||||
TPMBackend **be = qdev_get_prop_ptr(dev, prop);
|
||||
TPMBackend **be = qdev_get_prop_ptr(obj, prop);
|
||||
|
||||
if (*be) {
|
||||
tpm_backend_reset(*be);
|
||||
|
|
|
@ -175,9 +175,9 @@ static char *get_chardev(Object *obj, Error **errp)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void vhost_user_backend_init(Object *obj)
|
||||
static void vhost_user_backend_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
object_property_add_str(obj, "chardev", get_chardev, set_chardev);
|
||||
object_class_property_add_str(oc, "chardev", get_chardev, set_chardev);
|
||||
}
|
||||
|
||||
static void vhost_user_backend_finalize(Object *obj)
|
||||
|
@ -195,7 +195,7 @@ static const TypeInfo vhost_user_backend_info = {
|
|||
.name = TYPE_VHOST_USER_BACKEND,
|
||||
.parent = TYPE_OBJECT,
|
||||
.instance_size = sizeof(VhostUserBackend),
|
||||
.instance_init = vhost_user_backend_init,
|
||||
.class_init = vhost_user_backend_class_init,
|
||||
.instance_finalize = vhost_user_backend_finalize,
|
||||
};
|
||||
|
||||
|
|
138
block.c
138
block.c
|
@ -26,6 +26,7 @@
|
|||
#include "block/trace.h"
|
||||
#include "block/block_int.h"
|
||||
#include "block/blockjob.h"
|
||||
#include "block/fuse.h"
|
||||
#include "block/nbd.h"
|
||||
#include "block/qdict.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
@ -961,6 +962,11 @@ int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
|
|||
}
|
||||
|
||||
bs->total_sectors = hint;
|
||||
|
||||
if (bs->total_sectors * BDRV_SECTOR_SIZE > BDRV_MAX_LENGTH) {
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4458,6 +4464,15 @@ static void bdrv_close(BlockDriverState *bs)
|
|||
}
|
||||
QLIST_INIT(&bs->aio_notifiers);
|
||||
bdrv_drained_end(bs);
|
||||
|
||||
/*
|
||||
* If we're still inside some bdrv_drain_all_begin()/end() sections, end
|
||||
* them now since this BDS won't exist anymore when bdrv_drain_all_end()
|
||||
* gets called.
|
||||
*/
|
||||
if (bs->quiesce_counter) {
|
||||
bdrv_drain_all_end_quiesce(bs);
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_close_all(void)
|
||||
|
@ -4554,8 +4569,16 @@ static bool should_update_child(BdrvChild *c, BlockDriverState *to)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
|
||||
Error **errp)
|
||||
/*
|
||||
* With auto_skip=true bdrv_replace_node_common skips updating from parents
|
||||
* if it creates a parent-child relation loop or if parent is block-job.
|
||||
*
|
||||
* With auto_skip=false the error is returned if from has a parent which should
|
||||
* not be updated.
|
||||
*/
|
||||
static void bdrv_replace_node_common(BlockDriverState *from,
|
||||
BlockDriverState *to,
|
||||
bool auto_skip, Error **errp)
|
||||
{
|
||||
BdrvChild *c, *next;
|
||||
GSList *list = NULL, *p;
|
||||
|
@ -4574,7 +4597,12 @@ void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
|
|||
QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) {
|
||||
assert(c->bs == from);
|
||||
if (!should_update_child(c, to)) {
|
||||
continue;
|
||||
if (auto_skip) {
|
||||
continue;
|
||||
}
|
||||
error_setg(errp, "Should not change '%s' link to '%s'",
|
||||
c->name, from->node_name);
|
||||
goto out;
|
||||
}
|
||||
if (c->frozen) {
|
||||
error_setg(errp, "Cannot change '%s' link to '%s'",
|
||||
|
@ -4614,6 +4642,12 @@ out:
|
|||
bdrv_unref(from);
|
||||
}
|
||||
|
||||
void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
|
||||
Error **errp)
|
||||
{
|
||||
return bdrv_replace_node_common(from, to, true, errp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add new bs contents at the top of an image chain while the chain is
|
||||
* live, while keeping required fields on the top layer.
|
||||
|
@ -4882,9 +4916,11 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
|||
{
|
||||
BlockDriverState *explicit_top = top;
|
||||
bool update_inherits_from;
|
||||
BdrvChild *c, *next;
|
||||
BdrvChild *c;
|
||||
Error *local_err = NULL;
|
||||
int ret = -EIO;
|
||||
g_autoptr(GSList) updated_children = NULL;
|
||||
GSList *p;
|
||||
|
||||
bdrv_ref(top);
|
||||
bdrv_subtree_drained_begin(top);
|
||||
|
@ -4898,14 +4934,6 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
|||
goto exit;
|
||||
}
|
||||
|
||||
/* This function changes all links that point to top and makes
|
||||
* them point to base. Check that none of them is frozen. */
|
||||
QLIST_FOREACH(c, &top->parents, next_parent) {
|
||||
if (c->frozen) {
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
/* If 'base' recursively inherits from 'top' then we should set
|
||||
* base->inherits_from to top->inherits_from after 'top' and all
|
||||
* other intermediate nodes have been dropped.
|
||||
|
@ -4922,36 +4950,36 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
|||
backing_file_str = base->filename;
|
||||
}
|
||||
|
||||
QLIST_FOREACH_SAFE(c, &top->parents, next_parent, next) {
|
||||
/* Check whether we are allowed to switch c from top to base */
|
||||
GSList *ignore_children = g_slist_prepend(NULL, c);
|
||||
ret = bdrv_check_update_perm(base, NULL, c->perm, c->shared_perm,
|
||||
ignore_children, NULL, &local_err);
|
||||
g_slist_free(ignore_children);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
goto exit;
|
||||
}
|
||||
QLIST_FOREACH(c, &top->parents, next_parent) {
|
||||
updated_children = g_slist_prepend(updated_children, c);
|
||||
}
|
||||
|
||||
bdrv_replace_node_common(top, base, false, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
for (p = updated_children; p; p = p->next) {
|
||||
c = p->data;
|
||||
|
||||
/* If so, update the backing file path in the image file */
|
||||
if (c->klass->update_filename) {
|
||||
ret = c->klass->update_filename(c, base, backing_file_str,
|
||||
&local_err);
|
||||
if (ret < 0) {
|
||||
bdrv_abort_perm_update(base);
|
||||
/*
|
||||
* TODO: Actually, we want to rollback all previous iterations
|
||||
* of this loop, and (which is almost impossible) previous
|
||||
* bdrv_replace_node()...
|
||||
*
|
||||
* Note, that c->klass->update_filename may lead to permission
|
||||
* update, so it's a bad idea to call it inside permission
|
||||
* update transaction of bdrv_replace_node.
|
||||
*/
|
||||
error_report_err(local_err);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do the actual switch in the in-memory graph.
|
||||
* Completes bdrv_check_update_perm() transaction internally.
|
||||
* c->frozen is false, we have checked that above.
|
||||
*/
|
||||
bdrv_ref(base);
|
||||
bdrv_replace_child(c, base);
|
||||
bdrv_unref(top);
|
||||
}
|
||||
|
||||
if (update_inherits_from) {
|
||||
|
@ -5082,8 +5110,13 @@ int64_t bdrv_getlength(BlockDriverState *bs)
|
|||
{
|
||||
int64_t ret = bdrv_nb_sectors(bs);
|
||||
|
||||
ret = ret > INT64_MAX / BDRV_SECTOR_SIZE ? -EFBIG : ret;
|
||||
return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE;
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
if (ret > INT64_MAX / BDRV_SECTOR_SIZE) {
|
||||
return -EFBIG;
|
||||
}
|
||||
return ret * BDRV_SECTOR_SIZE;
|
||||
}
|
||||
|
||||
/* return 0 as number of sectors if no device present or error */
|
||||
|
@ -5211,7 +5244,7 @@ BlockDriverState *bdrv_find_node(const char *node_name)
|
|||
BlockDeviceInfoList *bdrv_named_nodes_list(bool flat,
|
||||
Error **errp)
|
||||
{
|
||||
BlockDeviceInfoList *list, *entry;
|
||||
BlockDeviceInfoList *list;
|
||||
BlockDriverState *bs;
|
||||
|
||||
list = NULL;
|
||||
|
@ -5221,22 +5254,12 @@ BlockDeviceInfoList *bdrv_named_nodes_list(bool flat,
|
|||
qapi_free_BlockDeviceInfoList(list);
|
||||
return NULL;
|
||||
}
|
||||
entry = g_malloc0(sizeof(*entry));
|
||||
entry->value = info;
|
||||
entry->next = list;
|
||||
list = entry;
|
||||
QAPI_LIST_PREPEND(list, info);
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
#define QAPI_LIST_ADD(list, element) do { \
|
||||
typeof(list) _tmp = g_new(typeof(*(list)), 1); \
|
||||
_tmp->value = (element); \
|
||||
_tmp->next = (list); \
|
||||
(list) = _tmp; \
|
||||
} while (0)
|
||||
|
||||
typedef struct XDbgBlockGraphConstructor {
|
||||
XDbgBlockGraph *graph;
|
||||
GHashTable *graph_nodes;
|
||||
|
@ -5291,7 +5314,7 @@ static void xdbg_graph_add_node(XDbgBlockGraphConstructor *gr, void *node,
|
|||
n->type = type;
|
||||
n->name = g_strdup(name);
|
||||
|
||||
QAPI_LIST_ADD(gr->graph->nodes, n);
|
||||
QAPI_LIST_PREPEND(gr->graph->nodes, n);
|
||||
}
|
||||
|
||||
static void xdbg_graph_add_edge(XDbgBlockGraphConstructor *gr, void *parent,
|
||||
|
@ -5310,14 +5333,14 @@ static void xdbg_graph_add_edge(XDbgBlockGraphConstructor *gr, void *parent,
|
|||
uint64_t flag = bdrv_qapi_perm_to_blk_perm(qapi_perm);
|
||||
|
||||
if (flag & child->perm) {
|
||||
QAPI_LIST_ADD(edge->perm, qapi_perm);
|
||||
QAPI_LIST_PREPEND(edge->perm, qapi_perm);
|
||||
}
|
||||
if (flag & child->shared_perm) {
|
||||
QAPI_LIST_ADD(edge->shared_perm, qapi_perm);
|
||||
QAPI_LIST_PREPEND(edge->shared_perm, qapi_perm);
|
||||
}
|
||||
}
|
||||
|
||||
QAPI_LIST_ADD(gr->graph->edges, edge);
|
||||
QAPI_LIST_PREPEND(gr->graph->edges, edge);
|
||||
}
|
||||
|
||||
|
||||
|
@ -5517,6 +5540,7 @@ void bdrv_get_backing_filename(BlockDriverState *bs,
|
|||
|
||||
int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
||||
{
|
||||
int ret;
|
||||
BlockDriver *drv = bs->drv;
|
||||
/* if bs->drv == NULL, bs is closed, so there's nothing to do here */
|
||||
if (!drv) {
|
||||
|
@ -5530,7 +5554,16 @@ int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
|||
return -ENOTSUP;
|
||||
}
|
||||
memset(bdi, 0, sizeof(*bdi));
|
||||
return drv->bdrv_get_info(bs, bdi);
|
||||
ret = drv->bdrv_get_info(bs, bdi);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (bdi->cluster_size > BDRV_MAX_ALIGNMENT) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
|
||||
|
@ -5783,6 +5816,7 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
|
|||
bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
|
||||
ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, NULL, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_abort_perm_update(bs);
|
||||
bs->open_flags |= BDRV_O_INACTIVE;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -199,29 +199,27 @@ static void block_account_one_io(BlockAcctStats *stats, BlockAcctCookie *cookie,
|
|||
return;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&stats->lock);
|
||||
WITH_QEMU_LOCK_GUARD(&stats->lock) {
|
||||
if (failed) {
|
||||
stats->failed_ops[cookie->type]++;
|
||||
} else {
|
||||
stats->nr_bytes[cookie->type] += cookie->bytes;
|
||||
stats->nr_ops[cookie->type]++;
|
||||
}
|
||||
|
||||
if (failed) {
|
||||
stats->failed_ops[cookie->type]++;
|
||||
} else {
|
||||
stats->nr_bytes[cookie->type] += cookie->bytes;
|
||||
stats->nr_ops[cookie->type]++;
|
||||
}
|
||||
block_latency_histogram_account(&stats->latency_histogram[cookie->type],
|
||||
latency_ns);
|
||||
|
||||
block_latency_histogram_account(&stats->latency_histogram[cookie->type],
|
||||
latency_ns);
|
||||
if (!failed || stats->account_failed) {
|
||||
stats->total_time_ns[cookie->type] += latency_ns;
|
||||
stats->last_access_time_ns = time_ns;
|
||||
|
||||
if (!failed || stats->account_failed) {
|
||||
stats->total_time_ns[cookie->type] += latency_ns;
|
||||
stats->last_access_time_ns = time_ns;
|
||||
|
||||
QSLIST_FOREACH(s, &stats->intervals, entries) {
|
||||
timed_average_account(&s->latency[cookie->type], latency_ns);
|
||||
QSLIST_FOREACH(s, &stats->intervals, entries) {
|
||||
timed_average_account(&s->latency[cookie->type], latency_ns);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qemu_mutex_unlock(&stats->lock);
|
||||
|
||||
cookie->type = BLOCK_ACCT_NONE;
|
||||
}
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
|
|||
{
|
||||
struct add_rule_data *d = opaque;
|
||||
BDRVBlkdebugState *s = d->s;
|
||||
const char* event_name;
|
||||
const char *event_name;
|
||||
int event;
|
||||
struct BlkdebugRule *rule;
|
||||
int64_t sector;
|
||||
|
@ -215,6 +215,7 @@ static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
|
|||
BLKDEBUG_IO_TYPE__MAX, &local_error);
|
||||
if (local_error) {
|
||||
error_propagate(errp, local_error);
|
||||
g_free(rule);
|
||||
return -1;
|
||||
}
|
||||
if (iotype != BLKDEBUG_IO_TYPE__MAX) {
|
||||
|
|
|
@ -156,7 +156,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
|
|||
/* Copy if allocated above the base */
|
||||
ret = bdrv_is_allocated_above(blk_bs(s->top), s->base_overlay, true,
|
||||
offset, COMMIT_BUFFER_SIZE, &n);
|
||||
copy = (ret == 1);
|
||||
copy = (ret > 0);
|
||||
trace_commit_one_iteration(s, offset, n, ret);
|
||||
if (copy) {
|
||||
assert(n < SIZE_MAX);
|
||||
|
|
|
@ -41,21 +41,25 @@ bdrv_pwritev(BdrvChild *child, int64_t offset, unsigned int bytes,
|
|||
int coroutine_fn
|
||||
bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
bool include_base,
|
||||
bool want_zero,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
int64_t *pnum,
|
||||
int64_t *map,
|
||||
BlockDriverState **file);
|
||||
BlockDriverState **file,
|
||||
int *depth);
|
||||
int generated_co_wrapper
|
||||
bdrv_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
bool include_base,
|
||||
bool want_zero,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
int64_t *pnum,
|
||||
int64_t *map,
|
||||
BlockDriverState **file);
|
||||
BlockDriverState **file,
|
||||
int *depth);
|
||||
|
||||
int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov, int64_t pos);
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
|
28
block/curl.c
28
block/curl.c
|
@ -564,23 +564,23 @@ static void curl_detach_aio_context(BlockDriverState *bs)
|
|||
BDRVCURLState *s = bs->opaque;
|
||||
int i;
|
||||
|
||||
qemu_mutex_lock(&s->mutex);
|
||||
for (i = 0; i < CURL_NUM_STATES; i++) {
|
||||
if (s->states[i].in_use) {
|
||||
curl_clean_state(&s->states[i]);
|
||||
WITH_QEMU_LOCK_GUARD(&s->mutex) {
|
||||
for (i = 0; i < CURL_NUM_STATES; i++) {
|
||||
if (s->states[i].in_use) {
|
||||
curl_clean_state(&s->states[i]);
|
||||
}
|
||||
if (s->states[i].curl) {
|
||||
curl_easy_cleanup(s->states[i].curl);
|
||||
s->states[i].curl = NULL;
|
||||
}
|
||||
g_free(s->states[i].orig_buf);
|
||||
s->states[i].orig_buf = NULL;
|
||||
}
|
||||
if (s->states[i].curl) {
|
||||
curl_easy_cleanup(s->states[i].curl);
|
||||
s->states[i].curl = NULL;
|
||||
if (s->multi) {
|
||||
curl_multi_cleanup(s->multi);
|
||||
s->multi = NULL;
|
||||
}
|
||||
g_free(s->states[i].orig_buf);
|
||||
s->states[i].orig_buf = NULL;
|
||||
}
|
||||
if (s->multi) {
|
||||
curl_multi_cleanup(s->multi);
|
||||
s->multi = NULL;
|
||||
}
|
||||
qemu_mutex_unlock(&s->mutex);
|
||||
|
||||
timer_del(&s->timer);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
* THE SOFTWARE.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "dmg.h"
|
||||
#include <lzfse.h>
|
||||
|
||||
|
|
|
@ -559,7 +559,7 @@ static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
|
||||
}
|
||||
|
||||
static inline int is_sector_in_chunk(BDRVDMGState* s,
|
||||
static inline int is_sector_in_chunk(BDRVDMGState *s,
|
||||
uint32_t chunk_num, uint64_t sector_num)
|
||||
{
|
||||
if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
|
||||
|
|
|
@ -15,15 +15,26 @@
|
|||
|
||||
#include "block/block.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "sysemu/iothread.h"
|
||||
#include "block/export.h"
|
||||
#include "block/fuse.h"
|
||||
#include "block/nbd.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qapi-commands-block-export.h"
|
||||
#include "qapi/qapi-events-block-export.h"
|
||||
#include "qemu/id.h"
|
||||
#ifdef CONFIG_VHOST_USER_BLK_SERVER
|
||||
#include "vhost-user-blk-server.h"
|
||||
#endif
|
||||
|
||||
static const BlockExportDriver *blk_exp_drivers[] = {
|
||||
&blk_exp_nbd,
|
||||
#ifdef CONFIG_VHOST_USER_BLK_SERVER
|
||||
&blk_exp_vhost_user_blk,
|
||||
#endif
|
||||
#ifdef CONFIG_FUSE
|
||||
&blk_exp_fuse,
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Only accessed from the main thread */
|
||||
|
@ -57,10 +68,11 @@ static const BlockExportDriver *blk_exp_find_driver(BlockExportType type)
|
|||
|
||||
BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||
{
|
||||
bool fixed_iothread = export->has_fixed_iothread && export->fixed_iothread;
|
||||
const BlockExportDriver *drv;
|
||||
BlockExport *exp = NULL;
|
||||
BlockDriverState *bs;
|
||||
BlockBackend *blk;
|
||||
BlockBackend *blk = NULL;
|
||||
AioContext *ctx;
|
||||
uint64_t perm;
|
||||
int ret;
|
||||
|
@ -96,6 +108,28 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
|||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
if (export->has_iothread) {
|
||||
IOThread *iothread;
|
||||
AioContext *new_ctx;
|
||||
|
||||
iothread = iothread_by_id(export->iothread);
|
||||
if (!iothread) {
|
||||
error_setg(errp, "iothread \"%s\" not found", export->iothread);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
new_ctx = iothread_get_aio_context(iothread);
|
||||
|
||||
ret = bdrv_try_set_aio_context(bs, new_ctx, errp);
|
||||
if (ret == 0) {
|
||||
aio_context_release(ctx);
|
||||
aio_context_acquire(new_ctx);
|
||||
ctx = new_ctx;
|
||||
} else if (fixed_iothread) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Block exports are used for non-shared storage migration. Make sure
|
||||
* that BDRV_O_INACTIVE is cleared and the image is ready for write
|
||||
|
@ -110,6 +144,11 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
|||
}
|
||||
|
||||
blk = blk_new(ctx, perm, BLK_PERM_ALL);
|
||||
|
||||
if (!fixed_iothread) {
|
||||
blk_set_allow_aio_context_change(blk, true);
|
||||
}
|
||||
|
||||
ret = blk_insert_bs(blk, bs, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
|
|
|
@ -0,0 +1,726 @@
|
|||
/*
|
||||
* Present a block device as a raw image through FUSE
|
||||
*
|
||||
* Copyright (c) 2020 Max Reitz <mreitz@redhat.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; under version 2 or later of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define FUSE_USE_VERSION 31
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "block/aio.h"
|
||||
#include "block/block.h"
|
||||
#include "block/export.h"
|
||||
#include "block/fuse.h"
|
||||
#include "block/qapi.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qapi-commands-block.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
|
||||
#include <fuse.h>
|
||||
#include <fuse_lowlevel.h>
|
||||
|
||||
|
||||
/* Prevent overly long bounce buffer allocations */
|
||||
#define FUSE_MAX_BOUNCE_BYTES (MIN(BDRV_REQUEST_MAX_BYTES, 64 * 1024 * 1024))
|
||||
|
||||
|
||||
typedef struct FuseExport {
|
||||
BlockExport common;
|
||||
|
||||
struct fuse_session *fuse_session;
|
||||
struct fuse_buf fuse_buf;
|
||||
bool mounted, fd_handler_set_up;
|
||||
|
||||
char *mountpoint;
|
||||
bool writable;
|
||||
bool growable;
|
||||
} FuseExport;
|
||||
|
||||
static GHashTable *exports;
|
||||
static const struct fuse_lowlevel_ops fuse_ops;
|
||||
|
||||
static void fuse_export_shutdown(BlockExport *exp);
|
||||
static void fuse_export_delete(BlockExport *exp);
|
||||
|
||||
static void init_exports_table(void);
|
||||
|
||||
static int setup_fuse_export(FuseExport *exp, const char *mountpoint,
|
||||
Error **errp);
|
||||
static void read_from_fuse_export(void *opaque);
|
||||
|
||||
static bool is_regular_file(const char *path, Error **errp);
|
||||
|
||||
|
||||
static int fuse_export_create(BlockExport *blk_exp,
|
||||
BlockExportOptions *blk_exp_args,
|
||||
Error **errp)
|
||||
{
|
||||
FuseExport *exp = container_of(blk_exp, FuseExport, common);
|
||||
BlockExportOptionsFuse *args = &blk_exp_args->u.fuse;
|
||||
int ret;
|
||||
|
||||
assert(blk_exp_args->type == BLOCK_EXPORT_TYPE_FUSE);
|
||||
|
||||
/* For growable exports, take the RESIZE permission */
|
||||
if (args->growable) {
|
||||
uint64_t blk_perm, blk_shared_perm;
|
||||
|
||||
blk_get_perm(exp->common.blk, &blk_perm, &blk_shared_perm);
|
||||
|
||||
ret = blk_set_perm(exp->common.blk, blk_perm | BLK_PERM_RESIZE,
|
||||
blk_shared_perm, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
init_exports_table();
|
||||
|
||||
/*
|
||||
* It is important to do this check before calling is_regular_file() --
|
||||
* that function will do a stat(), which we would have to handle if we
|
||||
* already exported something on @mountpoint. But we cannot, because
|
||||
* we are currently caught up here.
|
||||
* (Note that ideally we would want to resolve relative paths here,
|
||||
* but bdrv_make_absolute_filename() might do the wrong thing for
|
||||
* paths that contain colons, and realpath() would resolve symlinks,
|
||||
* which we do not want: The mount point is not going to be the
|
||||
* symlink's destination, but the link itself.)
|
||||
* So this will not catch all potential clashes, but hopefully at
|
||||
* least the most common one of specifying exactly the same path
|
||||
* string twice.
|
||||
*/
|
||||
if (g_hash_table_contains(exports, args->mountpoint)) {
|
||||
error_setg(errp, "There already is a FUSE export on '%s'",
|
||||
args->mountpoint);
|
||||
ret = -EEXIST;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!is_regular_file(args->mountpoint, errp)) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
exp->mountpoint = g_strdup(args->mountpoint);
|
||||
exp->writable = blk_exp_args->writable;
|
||||
exp->growable = args->growable;
|
||||
|
||||
ret = setup_fuse_export(exp, args->mountpoint, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
fuse_export_delete(blk_exp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates the global @exports hash table.
|
||||
*/
|
||||
static void init_exports_table(void)
|
||||
{
|
||||
if (exports) {
|
||||
return;
|
||||
}
|
||||
|
||||
exports = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create exp->fuse_session and mount it.
|
||||
*/
|
||||
static int setup_fuse_export(FuseExport *exp, const char *mountpoint,
|
||||
Error **errp)
|
||||
{
|
||||
const char *fuse_argv[4];
|
||||
char *mount_opts;
|
||||
struct fuse_args fuse_args;
|
||||
int ret;
|
||||
|
||||
/* Needs to match what fuse_init() sets. Only max_read must be supplied. */
|
||||
mount_opts = g_strdup_printf("max_read=%zu", FUSE_MAX_BOUNCE_BYTES);
|
||||
|
||||
fuse_argv[0] = ""; /* Dummy program name */
|
||||
fuse_argv[1] = "-o";
|
||||
fuse_argv[2] = mount_opts;
|
||||
fuse_argv[3] = NULL;
|
||||
fuse_args = (struct fuse_args)FUSE_ARGS_INIT(3, (char **)fuse_argv);
|
||||
|
||||
exp->fuse_session = fuse_session_new(&fuse_args, &fuse_ops,
|
||||
sizeof(fuse_ops), exp);
|
||||
g_free(mount_opts);
|
||||
if (!exp->fuse_session) {
|
||||
error_setg(errp, "Failed to set up FUSE session");
|
||||
ret = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = fuse_session_mount(exp->fuse_session, mountpoint);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Failed to mount FUSE session to export");
|
||||
ret = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
exp->mounted = true;
|
||||
|
||||
g_hash_table_insert(exports, g_strdup(mountpoint), NULL);
|
||||
|
||||
aio_set_fd_handler(exp->common.ctx,
|
||||
fuse_session_fd(exp->fuse_session), true,
|
||||
read_from_fuse_export, NULL, NULL, exp);
|
||||
exp->fd_handler_set_up = true;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
fuse_export_shutdown(&exp->common);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback to be invoked when the FUSE session FD can be read from.
|
||||
* (This is basically the FUSE event loop.)
|
||||
*/
|
||||
static void read_from_fuse_export(void *opaque)
|
||||
{
|
||||
FuseExport *exp = opaque;
|
||||
int ret;
|
||||
|
||||
blk_exp_ref(&exp->common);
|
||||
|
||||
do {
|
||||
ret = fuse_session_receive_buf(exp->fuse_session, &exp->fuse_buf);
|
||||
} while (ret == -EINTR);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
fuse_session_process_buf(exp->fuse_session, &exp->fuse_buf);
|
||||
|
||||
out:
|
||||
blk_exp_unref(&exp->common);
|
||||
}
|
||||
|
||||
static void fuse_export_shutdown(BlockExport *blk_exp)
|
||||
{
|
||||
FuseExport *exp = container_of(blk_exp, FuseExport, common);
|
||||
|
||||
if (exp->fuse_session) {
|
||||
fuse_session_exit(exp->fuse_session);
|
||||
|
||||
if (exp->fd_handler_set_up) {
|
||||
aio_set_fd_handler(exp->common.ctx,
|
||||
fuse_session_fd(exp->fuse_session), true,
|
||||
NULL, NULL, NULL, NULL);
|
||||
exp->fd_handler_set_up = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (exp->mountpoint) {
|
||||
/*
|
||||
* Safe to drop now, because we will not handle any requests
|
||||
* for this export anymore anyway.
|
||||
*/
|
||||
g_hash_table_remove(exports, exp->mountpoint);
|
||||
}
|
||||
}
|
||||
|
||||
static void fuse_export_delete(BlockExport *blk_exp)
|
||||
{
|
||||
FuseExport *exp = container_of(blk_exp, FuseExport, common);
|
||||
|
||||
if (exp->fuse_session) {
|
||||
if (exp->mounted) {
|
||||
fuse_session_unmount(exp->fuse_session);
|
||||
}
|
||||
|
||||
fuse_session_destroy(exp->fuse_session);
|
||||
}
|
||||
|
||||
free(exp->fuse_buf.mem);
|
||||
g_free(exp->mountpoint);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether @path points to a regular file. If not, put an
|
||||
* appropriate message into *errp.
|
||||
*/
|
||||
static bool is_regular_file(const char *path, Error **errp)
|
||||
{
|
||||
struct stat statbuf;
|
||||
int ret;
|
||||
|
||||
ret = stat(path, &statbuf);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, errno, "Failed to stat '%s'", path);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!S_ISREG(statbuf.st_mode)) {
|
||||
error_setg(errp, "'%s' is not a regular file", path);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* A chance to set change some parameters supplied to FUSE_INIT.
|
||||
*/
|
||||
static void fuse_init(void *userdata, struct fuse_conn_info *conn)
|
||||
{
|
||||
/*
|
||||
* MIN_NON_ZERO() would not be wrong here, but what we set here
|
||||
* must equal what has been passed to fuse_session_new().
|
||||
* Therefore, as long as max_read must be passed as a mount option
|
||||
* (which libfuse claims will be changed at some point), we have
|
||||
* to set max_read to a fixed value here.
|
||||
*/
|
||||
conn->max_read = FUSE_MAX_BOUNCE_BYTES;
|
||||
|
||||
conn->max_write = MIN_NON_ZERO(BDRV_REQUEST_MAX_BYTES, conn->max_write);
|
||||
}
|
||||
|
||||
/**
|
||||
* Let clients look up files. Always return ENOENT because we only
|
||||
* care about the mountpoint itself.
|
||||
*/
|
||||
static void fuse_lookup(fuse_req_t req, fuse_ino_t parent, const char *name)
|
||||
{
|
||||
fuse_reply_err(req, ENOENT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Let clients get file attributes (i.e., stat() the file).
|
||||
*/
|
||||
static void fuse_getattr(fuse_req_t req, fuse_ino_t inode,
|
||||
struct fuse_file_info *fi)
|
||||
{
|
||||
struct stat statbuf;
|
||||
int64_t length, allocated_blocks;
|
||||
time_t now = time(NULL);
|
||||
FuseExport *exp = fuse_req_userdata(req);
|
||||
mode_t mode;
|
||||
|
||||
length = blk_getlength(exp->common.blk);
|
||||
if (length < 0) {
|
||||
fuse_reply_err(req, -length);
|
||||
return;
|
||||
}
|
||||
|
||||
allocated_blocks = bdrv_get_allocated_file_size(blk_bs(exp->common.blk));
|
||||
if (allocated_blocks <= 0) {
|
||||
allocated_blocks = DIV_ROUND_UP(length, 512);
|
||||
} else {
|
||||
allocated_blocks = DIV_ROUND_UP(allocated_blocks, 512);
|
||||
}
|
||||
|
||||
mode = S_IFREG | S_IRUSR;
|
||||
if (exp->writable) {
|
||||
mode |= S_IWUSR;
|
||||
}
|
||||
|
||||
statbuf = (struct stat) {
|
||||
.st_ino = inode,
|
||||
.st_mode = mode,
|
||||
.st_nlink = 1,
|
||||
.st_uid = getuid(),
|
||||
.st_gid = getgid(),
|
||||
.st_size = length,
|
||||
.st_blksize = blk_bs(exp->common.blk)->bl.request_alignment,
|
||||
.st_blocks = allocated_blocks,
|
||||
.st_atime = now,
|
||||
.st_mtime = now,
|
||||
.st_ctime = now,
|
||||
};
|
||||
|
||||
fuse_reply_attr(req, &statbuf, 1.);
|
||||
}
|
||||
|
||||
static int fuse_do_truncate(const FuseExport *exp, int64_t size,
|
||||
bool req_zero_write, PreallocMode prealloc)
|
||||
{
|
||||
uint64_t blk_perm, blk_shared_perm;
|
||||
BdrvRequestFlags truncate_flags = 0;
|
||||
int ret;
|
||||
|
||||
if (req_zero_write) {
|
||||
truncate_flags |= BDRV_REQ_ZERO_WRITE;
|
||||
}
|
||||
|
||||
/* Growable exports have a permanent RESIZE permission */
|
||||
if (!exp->growable) {
|
||||
blk_get_perm(exp->common.blk, &blk_perm, &blk_shared_perm);
|
||||
|
||||
ret = blk_set_perm(exp->common.blk, blk_perm | BLK_PERM_RESIZE,
|
||||
blk_shared_perm, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = blk_truncate(exp->common.blk, size, true, prealloc,
|
||||
truncate_flags, NULL);
|
||||
|
||||
if (!exp->growable) {
|
||||
/* Must succeed, because we are only giving up the RESIZE permission */
|
||||
blk_set_perm(exp->common.blk, blk_perm, blk_shared_perm, &error_abort);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Let clients set file attributes. Only resizing is supported.
|
||||
*/
|
||||
static void fuse_setattr(fuse_req_t req, fuse_ino_t inode, struct stat *statbuf,
|
||||
int to_set, struct fuse_file_info *fi)
|
||||
{
|
||||
FuseExport *exp = fuse_req_userdata(req);
|
||||
int ret;
|
||||
|
||||
if (!exp->writable) {
|
||||
fuse_reply_err(req, EACCES);
|
||||
return;
|
||||
}
|
||||
|
||||
if (to_set & ~FUSE_SET_ATTR_SIZE) {
|
||||
fuse_reply_err(req, ENOTSUP);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = fuse_do_truncate(exp, statbuf->st_size, true, PREALLOC_MODE_OFF);
|
||||
if (ret < 0) {
|
||||
fuse_reply_err(req, -ret);
|
||||
return;
|
||||
}
|
||||
|
||||
fuse_getattr(req, inode, fi);
|
||||
}
|
||||
|
||||
/**
|
||||
* Let clients open a file (i.e., the exported image).
|
||||
*/
|
||||
static void fuse_open(fuse_req_t req, fuse_ino_t inode,
|
||||
struct fuse_file_info *fi)
|
||||
{
|
||||
fuse_reply_open(req, fi);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle client reads from the exported image.
|
||||
*/
|
||||
static void fuse_read(fuse_req_t req, fuse_ino_t inode,
|
||||
size_t size, off_t offset, struct fuse_file_info *fi)
|
||||
{
|
||||
FuseExport *exp = fuse_req_userdata(req);
|
||||
int64_t length;
|
||||
void *buf;
|
||||
int ret;
|
||||
|
||||
/* Limited by max_read, should not happen */
|
||||
if (size > FUSE_MAX_BOUNCE_BYTES) {
|
||||
fuse_reply_err(req, EINVAL);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clients will expect short reads at EOF, so we have to limit
|
||||
* offset+size to the image length.
|
||||
*/
|
||||
length = blk_getlength(exp->common.blk);
|
||||
if (length < 0) {
|
||||
fuse_reply_err(req, -length);
|
||||
return;
|
||||
}
|
||||
|
||||
if (offset + size > length) {
|
||||
size = length - offset;
|
||||
}
|
||||
|
||||
buf = qemu_try_blockalign(blk_bs(exp->common.blk), size);
|
||||
if (!buf) {
|
||||
fuse_reply_err(req, ENOMEM);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = blk_pread(exp->common.blk, offset, buf, size);
|
||||
if (ret >= 0) {
|
||||
fuse_reply_buf(req, buf, size);
|
||||
} else {
|
||||
fuse_reply_err(req, -ret);
|
||||
}
|
||||
|
||||
qemu_vfree(buf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle client writes to the exported image.
|
||||
*/
|
||||
static void fuse_write(fuse_req_t req, fuse_ino_t inode, const char *buf,
|
||||
size_t size, off_t offset, struct fuse_file_info *fi)
|
||||
{
|
||||
FuseExport *exp = fuse_req_userdata(req);
|
||||
int64_t length;
|
||||
int ret;
|
||||
|
||||
/* Limited by max_write, should not happen */
|
||||
if (size > BDRV_REQUEST_MAX_BYTES) {
|
||||
fuse_reply_err(req, EINVAL);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!exp->writable) {
|
||||
fuse_reply_err(req, EACCES);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clients will expect short writes at EOF, so we have to limit
|
||||
* offset+size to the image length.
|
||||
*/
|
||||
length = blk_getlength(exp->common.blk);
|
||||
if (length < 0) {
|
||||
fuse_reply_err(req, -length);
|
||||
return;
|
||||
}
|
||||
|
||||
if (offset + size > length) {
|
||||
if (exp->growable) {
|
||||
ret = fuse_do_truncate(exp, offset + size, true, PREALLOC_MODE_OFF);
|
||||
if (ret < 0) {
|
||||
fuse_reply_err(req, -ret);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
size = length - offset;
|
||||
}
|
||||
}
|
||||
|
||||
ret = blk_pwrite(exp->common.blk, offset, buf, size, 0);
|
||||
if (ret >= 0) {
|
||||
fuse_reply_write(req, size);
|
||||
} else {
|
||||
fuse_reply_err(req, -ret);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Let clients perform various fallocate() operations.
|
||||
*/
|
||||
static void fuse_fallocate(fuse_req_t req, fuse_ino_t inode, int mode,
|
||||
off_t offset, off_t length,
|
||||
struct fuse_file_info *fi)
|
||||
{
|
||||
FuseExport *exp = fuse_req_userdata(req);
|
||||
int64_t blk_len;
|
||||
int ret;
|
||||
|
||||
if (!exp->writable) {
|
||||
fuse_reply_err(req, EACCES);
|
||||
return;
|
||||
}
|
||||
|
||||
blk_len = blk_getlength(exp->common.blk);
|
||||
if (blk_len < 0) {
|
||||
fuse_reply_err(req, -blk_len);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mode & FALLOC_FL_KEEP_SIZE) {
|
||||
length = MIN(length, blk_len - offset);
|
||||
}
|
||||
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
if (!(mode & FALLOC_FL_KEEP_SIZE)) {
|
||||
fuse_reply_err(req, EINVAL);
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
int size = MIN(length, BDRV_REQUEST_MAX_BYTES);
|
||||
|
||||
ret = blk_pdiscard(exp->common.blk, offset, size);
|
||||
offset += size;
|
||||
length -= size;
|
||||
} while (ret == 0 && length > 0);
|
||||
} else if (mode & FALLOC_FL_ZERO_RANGE) {
|
||||
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + length > blk_len) {
|
||||
/* No need for zeroes, we are going to write them ourselves */
|
||||
ret = fuse_do_truncate(exp, offset + length, false,
|
||||
PREALLOC_MODE_OFF);
|
||||
if (ret < 0) {
|
||||
fuse_reply_err(req, -ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
int size = MIN(length, BDRV_REQUEST_MAX_BYTES);
|
||||
|
||||
ret = blk_pwrite_zeroes(exp->common.blk,
|
||||
offset, size, 0);
|
||||
offset += size;
|
||||
length -= size;
|
||||
} while (ret == 0 && length > 0);
|
||||
} else if (!mode) {
|
||||
/* We can only fallocate at the EOF with a truncate */
|
||||
if (offset < blk_len) {
|
||||
fuse_reply_err(req, EOPNOTSUPP);
|
||||
return;
|
||||
}
|
||||
|
||||
if (offset > blk_len) {
|
||||
/* No preallocation needed here */
|
||||
ret = fuse_do_truncate(exp, offset, true, PREALLOC_MODE_OFF);
|
||||
if (ret < 0) {
|
||||
fuse_reply_err(req, -ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ret = fuse_do_truncate(exp, offset + length, true,
|
||||
PREALLOC_MODE_FALLOC);
|
||||
} else {
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
fuse_reply_err(req, ret < 0 ? -ret : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Let clients fsync the exported image.
|
||||
*/
|
||||
static void fuse_fsync(fuse_req_t req, fuse_ino_t inode, int datasync,
|
||||
struct fuse_file_info *fi)
|
||||
{
|
||||
FuseExport *exp = fuse_req_userdata(req);
|
||||
int ret;
|
||||
|
||||
ret = blk_flush(exp->common.blk);
|
||||
fuse_reply_err(req, ret < 0 ? -ret : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before an FD to the exported image is closed. (libfuse
|
||||
* notes this to be a way to return last-minute errors.)
|
||||
*/
|
||||
static void fuse_flush(fuse_req_t req, fuse_ino_t inode,
|
||||
struct fuse_file_info *fi)
|
||||
{
|
||||
fuse_fsync(req, inode, 1, fi);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUSE_LSEEK
|
||||
/**
|
||||
* Let clients inquire allocation status.
|
||||
*/
|
||||
static void fuse_lseek(fuse_req_t req, fuse_ino_t inode, off_t offset,
|
||||
int whence, struct fuse_file_info *fi)
|
||||
{
|
||||
FuseExport *exp = fuse_req_userdata(req);
|
||||
|
||||
if (whence != SEEK_HOLE && whence != SEEK_DATA) {
|
||||
fuse_reply_err(req, EINVAL);
|
||||
return;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
int64_t pnum;
|
||||
int ret;
|
||||
|
||||
ret = bdrv_block_status_above(blk_bs(exp->common.blk), NULL,
|
||||
offset, INT64_MAX, &pnum, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fuse_reply_err(req, -ret);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!pnum && (ret & BDRV_BLOCK_EOF)) {
|
||||
int64_t blk_len;
|
||||
|
||||
/*
|
||||
* If blk_getlength() rounds (e.g. by sectors), then the
|
||||
* export length will be rounded, too. However,
|
||||
* bdrv_block_status_above() may return EOF at unaligned
|
||||
* offsets. We must not let this become visible and thus
|
||||
* always simulate a hole between @offset (the real EOF)
|
||||
* and @blk_len (the client-visible EOF).
|
||||
*/
|
||||
|
||||
blk_len = blk_getlength(exp->common.blk);
|
||||
if (blk_len < 0) {
|
||||
fuse_reply_err(req, -blk_len);
|
||||
return;
|
||||
}
|
||||
|
||||
if (offset > blk_len || whence == SEEK_DATA) {
|
||||
fuse_reply_err(req, ENXIO);
|
||||
} else {
|
||||
fuse_reply_lseek(req, offset);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (ret & BDRV_BLOCK_DATA) {
|
||||
if (whence == SEEK_DATA) {
|
||||
fuse_reply_lseek(req, offset);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (whence == SEEK_HOLE) {
|
||||
fuse_reply_lseek(req, offset);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Safety check against infinite loops */
|
||||
if (!pnum) {
|
||||
fuse_reply_err(req, ENXIO);
|
||||
return;
|
||||
}
|
||||
|
||||
offset += pnum;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct fuse_lowlevel_ops fuse_ops = {
|
||||
.init = fuse_init,
|
||||
.lookup = fuse_lookup,
|
||||
.getattr = fuse_getattr,
|
||||
.setattr = fuse_setattr,
|
||||
.open = fuse_open,
|
||||
.read = fuse_read,
|
||||
.write = fuse_write,
|
||||
.fallocate = fuse_fallocate,
|
||||
.flush = fuse_flush,
|
||||
.fsync = fuse_fsync,
|
||||
#ifdef CONFIG_FUSE_LSEEK
|
||||
.lseek = fuse_lseek,
|
||||
#endif
|
||||
};
|
||||
|
||||
const BlockExportDriver blk_exp_fuse = {
|
||||
.type = BLOCK_EXPORT_TYPE_FUSE,
|
||||
.instance_size = sizeof(FuseExport),
|
||||
.create = fuse_export_create,
|
||||
.delete = fuse_export_delete,
|
||||
.request_shutdown = fuse_export_shutdown,
|
||||
};
|
|
@ -1 +1,7 @@
|
|||
block_ss.add(files('export.c'))
|
||||
blockdev_ss.add(files('export.c'))
|
||||
|
||||
if have_vhost_user_blk_server
|
||||
blockdev_ss.add(files('vhost-user-blk-server.c'))
|
||||
endif
|
||||
|
||||
blockdev_ss.add(when: fuse, if_true: files('fuse.c'))
|
||||
|
|
|
@ -0,0 +1,435 @@
|
|||
/*
|
||||
* Sharing QEMU block devices via vhost-user protocal
|
||||
*
|
||||
* Parts of the code based on nbd/server.c.
|
||||
*
|
||||
* Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
|
||||
* Copyright (c) 2020 Red Hat, Inc.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* later. See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "block/block.h"
|
||||
#include "subprojects/libvhost-user/libvhost-user.h" /* only for the type definitions */
|
||||
#include "standard-headers/linux/virtio_blk.h"
|
||||
#include "qemu/vhost-user-server.h"
|
||||
#include "vhost-user-blk-server.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "util/block-helpers.h"
|
||||
|
||||
enum {
|
||||
VHOST_USER_BLK_NUM_QUEUES_DEFAULT = 1,
|
||||
};
|
||||
struct virtio_blk_inhdr {
|
||||
unsigned char status;
|
||||
};
|
||||
|
||||
typedef struct VuBlkReq {
|
||||
VuVirtqElement elem;
|
||||
int64_t sector_num;
|
||||
size_t size;
|
||||
struct virtio_blk_inhdr *in;
|
||||
struct virtio_blk_outhdr out;
|
||||
VuServer *server;
|
||||
struct VuVirtq *vq;
|
||||
} VuBlkReq;
|
||||
|
||||
/* vhost user block device */
|
||||
typedef struct {
|
||||
BlockExport export;
|
||||
VuServer vu_server;
|
||||
uint32_t blk_size;
|
||||
QIOChannelSocket *sioc;
|
||||
struct virtio_blk_config blkcfg;
|
||||
bool writable;
|
||||
} VuBlkExport;
|
||||
|
||||
static void vu_blk_req_complete(VuBlkReq *req)
|
||||
{
|
||||
VuDev *vu_dev = &req->server->vu_dev;
|
||||
|
||||
/* IO size with 1 extra status byte */
|
||||
vu_queue_push(vu_dev, req->vq, &req->elem, req->size + 1);
|
||||
vu_queue_notify(vu_dev, req->vq);
|
||||
|
||||
free(req);
|
||||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vu_blk_discard_write_zeroes(BlockBackend *blk, struct iovec *iov,
|
||||
uint32_t iovcnt, uint32_t type)
|
||||
{
|
||||
struct virtio_blk_discard_write_zeroes desc;
|
||||
ssize_t size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc));
|
||||
if (unlikely(size != sizeof(desc))) {
|
||||
error_report("Invalid size %zd, expect %zu", size, sizeof(desc));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
uint64_t range[2] = { le64_to_cpu(desc.sector) << 9,
|
||||
le32_to_cpu(desc.num_sectors) << 9 };
|
||||
if (type == VIRTIO_BLK_T_DISCARD) {
|
||||
if (blk_co_pdiscard(blk, range[0], range[1]) == 0) {
|
||||
return 0;
|
||||
}
|
||||
} else if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
|
||||
if (blk_co_pwrite_zeroes(blk, range[0], range[1], 0) == 0) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void coroutine_fn vu_blk_virtio_process_req(void *opaque)
|
||||
{
|
||||
VuBlkReq *req = opaque;
|
||||
VuServer *server = req->server;
|
||||
VuVirtqElement *elem = &req->elem;
|
||||
uint32_t type;
|
||||
|
||||
VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
|
||||
BlockBackend *blk = vexp->export.blk;
|
||||
|
||||
struct iovec *in_iov = elem->in_sg;
|
||||
struct iovec *out_iov = elem->out_sg;
|
||||
unsigned in_num = elem->in_num;
|
||||
unsigned out_num = elem->out_num;
|
||||
|
||||
/* refer to hw/block/virtio_blk.c */
|
||||
if (elem->out_num < 1 || elem->in_num < 1) {
|
||||
error_report("virtio-blk request missing headers");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out,
|
||||
sizeof(req->out)) != sizeof(req->out))) {
|
||||
error_report("virtio-blk request outhdr too short");
|
||||
goto err;
|
||||
}
|
||||
|
||||
iov_discard_front(&out_iov, &out_num, sizeof(req->out));
|
||||
|
||||
if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
|
||||
error_report("virtio-blk request inhdr too short");
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* We always touch the last byte, so just see how big in_iov is. */
|
||||
req->in = (void *)in_iov[in_num - 1].iov_base
|
||||
+ in_iov[in_num - 1].iov_len
|
||||
- sizeof(struct virtio_blk_inhdr);
|
||||
iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
|
||||
|
||||
type = le32_to_cpu(req->out.type);
|
||||
switch (type & ~VIRTIO_BLK_T_BARRIER) {
|
||||
case VIRTIO_BLK_T_IN:
|
||||
case VIRTIO_BLK_T_OUT: {
|
||||
ssize_t ret = 0;
|
||||
bool is_write = type & VIRTIO_BLK_T_OUT;
|
||||
req->sector_num = le64_to_cpu(req->out.sector);
|
||||
|
||||
if (is_write && !vexp->writable) {
|
||||
req->in->status = VIRTIO_BLK_S_IOERR;
|
||||
break;
|
||||
}
|
||||
|
||||
int64_t offset = req->sector_num * vexp->blk_size;
|
||||
QEMUIOVector qiov;
|
||||
if (is_write) {
|
||||
qemu_iovec_init_external(&qiov, out_iov, out_num);
|
||||
ret = blk_co_pwritev(blk, offset, qiov.size, &qiov, 0);
|
||||
} else {
|
||||
qemu_iovec_init_external(&qiov, in_iov, in_num);
|
||||
ret = blk_co_preadv(blk, offset, qiov.size, &qiov, 0);
|
||||
}
|
||||
if (ret >= 0) {
|
||||
req->in->status = VIRTIO_BLK_S_OK;
|
||||
} else {
|
||||
req->in->status = VIRTIO_BLK_S_IOERR;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case VIRTIO_BLK_T_FLUSH:
|
||||
if (blk_co_flush(blk) == 0) {
|
||||
req->in->status = VIRTIO_BLK_S_OK;
|
||||
} else {
|
||||
req->in->status = VIRTIO_BLK_S_IOERR;
|
||||
}
|
||||
break;
|
||||
case VIRTIO_BLK_T_GET_ID: {
|
||||
size_t size = MIN(iov_size(&elem->in_sg[0], in_num),
|
||||
VIRTIO_BLK_ID_BYTES);
|
||||
snprintf(elem->in_sg[0].iov_base, size, "%s", "vhost_user_blk");
|
||||
req->in->status = VIRTIO_BLK_S_OK;
|
||||
req->size = elem->in_sg[0].iov_len;
|
||||
break;
|
||||
}
|
||||
case VIRTIO_BLK_T_DISCARD:
|
||||
case VIRTIO_BLK_T_WRITE_ZEROES: {
|
||||
int rc;
|
||||
|
||||
if (!vexp->writable) {
|
||||
req->in->status = VIRTIO_BLK_S_IOERR;
|
||||
break;
|
||||
}
|
||||
|
||||
rc = vu_blk_discard_write_zeroes(blk, &elem->out_sg[1], out_num, type);
|
||||
if (rc == 0) {
|
||||
req->in->status = VIRTIO_BLK_S_OK;
|
||||
} else {
|
||||
req->in->status = VIRTIO_BLK_S_IOERR;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
req->in->status = VIRTIO_BLK_S_UNSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
vu_blk_req_complete(req);
|
||||
return;
|
||||
|
||||
err:
|
||||
free(req);
|
||||
}
|
||||
|
||||
static void vu_blk_process_vq(VuDev *vu_dev, int idx)
|
||||
{
|
||||
VuServer *server = container_of(vu_dev, VuServer, vu_dev);
|
||||
VuVirtq *vq = vu_get_queue(vu_dev, idx);
|
||||
|
||||
while (1) {
|
||||
VuBlkReq *req;
|
||||
|
||||
req = vu_queue_pop(vu_dev, vq, sizeof(VuBlkReq));
|
||||
if (!req) {
|
||||
break;
|
||||
}
|
||||
|
||||
req->server = server;
|
||||
req->vq = vq;
|
||||
|
||||
Coroutine *co =
|
||||
qemu_coroutine_create(vu_blk_virtio_process_req, req);
|
||||
qemu_coroutine_enter(co);
|
||||
}
|
||||
}
|
||||
|
||||
static void vu_blk_queue_set_started(VuDev *vu_dev, int idx, bool started)
|
||||
{
|
||||
VuVirtq *vq;
|
||||
|
||||
assert(vu_dev);
|
||||
|
||||
vq = vu_get_queue(vu_dev, idx);
|
||||
vu_set_queue_handler(vu_dev, vq, started ? vu_blk_process_vq : NULL);
|
||||
}
|
||||
|
||||
static uint64_t vu_blk_get_features(VuDev *dev)
|
||||
{
|
||||
uint64_t features;
|
||||
VuServer *server = container_of(dev, VuServer, vu_dev);
|
||||
VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
|
||||
features = 1ull << VIRTIO_BLK_F_SIZE_MAX |
|
||||
1ull << VIRTIO_BLK_F_SEG_MAX |
|
||||
1ull << VIRTIO_BLK_F_TOPOLOGY |
|
||||
1ull << VIRTIO_BLK_F_BLK_SIZE |
|
||||
1ull << VIRTIO_BLK_F_FLUSH |
|
||||
1ull << VIRTIO_BLK_F_DISCARD |
|
||||
1ull << VIRTIO_BLK_F_WRITE_ZEROES |
|
||||
1ull << VIRTIO_BLK_F_CONFIG_WCE |
|
||||
1ull << VIRTIO_BLK_F_MQ |
|
||||
1ull << VIRTIO_F_VERSION_1 |
|
||||
1ull << VIRTIO_RING_F_INDIRECT_DESC |
|
||||
1ull << VIRTIO_RING_F_EVENT_IDX |
|
||||
1ull << VHOST_USER_F_PROTOCOL_FEATURES;
|
||||
|
||||
if (!vexp->writable) {
|
||||
features |= 1ull << VIRTIO_BLK_F_RO;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static uint64_t vu_blk_get_protocol_features(VuDev *dev)
|
||||
{
|
||||
return 1ull << VHOST_USER_PROTOCOL_F_CONFIG |
|
||||
1ull << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD;
|
||||
}
|
||||
|
||||
static int
|
||||
vu_blk_get_config(VuDev *vu_dev, uint8_t *config, uint32_t len)
|
||||
{
|
||||
VuServer *server = container_of(vu_dev, VuServer, vu_dev);
|
||||
VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
|
||||
|
||||
if (len > sizeof(struct virtio_blk_config)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(config, &vexp->blkcfg, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vu_blk_set_config(VuDev *vu_dev, const uint8_t *data,
|
||||
uint32_t offset, uint32_t size, uint32_t flags)
|
||||
{
|
||||
VuServer *server = container_of(vu_dev, VuServer, vu_dev);
|
||||
VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
|
||||
uint8_t wce;
|
||||
|
||||
/* don't support live migration */
|
||||
if (flags != VHOST_SET_CONFIG_TYPE_MASTER) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (offset != offsetof(struct virtio_blk_config, wce) ||
|
||||
size != 1) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
wce = *data;
|
||||
vexp->blkcfg.wce = wce;
|
||||
blk_set_enable_write_cache(vexp->export.blk, wce);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* When the client disconnects, it sends a VHOST_USER_NONE request
|
||||
* and vu_process_message will simple call exit which cause the VM
|
||||
* to exit abruptly.
|
||||
* To avoid this issue, process VHOST_USER_NONE request ahead
|
||||
* of vu_process_message.
|
||||
*
|
||||
*/
|
||||
static int vu_blk_process_msg(VuDev *dev, VhostUserMsg *vmsg, int *do_reply)
|
||||
{
|
||||
if (vmsg->request == VHOST_USER_NONE) {
|
||||
dev->panic(dev, "disconnect");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static const VuDevIface vu_blk_iface = {
|
||||
.get_features = vu_blk_get_features,
|
||||
.queue_set_started = vu_blk_queue_set_started,
|
||||
.get_protocol_features = vu_blk_get_protocol_features,
|
||||
.get_config = vu_blk_get_config,
|
||||
.set_config = vu_blk_set_config,
|
||||
.process_msg = vu_blk_process_msg,
|
||||
};
|
||||
|
||||
static void blk_aio_attached(AioContext *ctx, void *opaque)
|
||||
{
|
||||
VuBlkExport *vexp = opaque;
|
||||
|
||||
vexp->export.ctx = ctx;
|
||||
vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
|
||||
}
|
||||
|
||||
static void blk_aio_detach(void *opaque)
|
||||
{
|
||||
VuBlkExport *vexp = opaque;
|
||||
|
||||
vhost_user_server_detach_aio_context(&vexp->vu_server);
|
||||
vexp->export.ctx = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
vu_blk_initialize_config(BlockDriverState *bs,
|
||||
struct virtio_blk_config *config,
|
||||
uint32_t blk_size,
|
||||
uint16_t num_queues)
|
||||
{
|
||||
config->capacity = cpu_to_le64(bdrv_getlength(bs) >> BDRV_SECTOR_BITS);
|
||||
config->blk_size = cpu_to_le32(blk_size);
|
||||
config->size_max = cpu_to_le32(0);
|
||||
config->seg_max = cpu_to_le32(128 - 2);
|
||||
config->min_io_size = cpu_to_le16(1);
|
||||
config->opt_io_size = cpu_to_le32(1);
|
||||
config->num_queues = cpu_to_le16(num_queues);
|
||||
config->max_discard_sectors = cpu_to_le32(32768);
|
||||
config->max_discard_seg = cpu_to_le32(1);
|
||||
config->discard_sector_alignment = cpu_to_le32(config->blk_size >> 9);
|
||||
config->max_write_zeroes_sectors = cpu_to_le32(32768);
|
||||
config->max_write_zeroes_seg = cpu_to_le32(1);
|
||||
}
|
||||
|
||||
static void vu_blk_exp_request_shutdown(BlockExport *exp)
|
||||
{
|
||||
VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
|
||||
|
||||
vhost_user_server_stop(&vexp->vu_server);
|
||||
}
|
||||
|
||||
static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
|
||||
Error **errp)
|
||||
{
|
||||
VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
|
||||
BlockExportOptionsVhostUserBlk *vu_opts = &opts->u.vhost_user_blk;
|
||||
Error *local_err = NULL;
|
||||
uint64_t logical_block_size;
|
||||
uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT;
|
||||
|
||||
vexp->writable = opts->writable;
|
||||
vexp->blkcfg.wce = 0;
|
||||
|
||||
if (vu_opts->has_logical_block_size) {
|
||||
logical_block_size = vu_opts->logical_block_size;
|
||||
} else {
|
||||
logical_block_size = BDRV_SECTOR_SIZE;
|
||||
}
|
||||
check_block_size(exp->id, "logical-block-size", logical_block_size,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return -EINVAL;
|
||||
}
|
||||
vexp->blk_size = logical_block_size;
|
||||
blk_set_guest_block_size(exp->blk, logical_block_size);
|
||||
|
||||
if (vu_opts->has_num_queues) {
|
||||
num_queues = vu_opts->num_queues;
|
||||
}
|
||||
if (num_queues == 0) {
|
||||
error_setg(errp, "num-queues must be greater than 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vu_blk_initialize_config(blk_bs(exp->blk), &vexp->blkcfg,
|
||||
logical_block_size, num_queues);
|
||||
|
||||
blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
|
||||
vexp);
|
||||
|
||||
if (!vhost_user_server_start(&vexp->vu_server, vu_opts->addr, exp->ctx,
|
||||
num_queues, &vu_blk_iface, errp)) {
|
||||
blk_remove_aio_context_notifier(exp->blk, blk_aio_attached,
|
||||
blk_aio_detach, vexp);
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vu_blk_exp_delete(BlockExport *exp)
|
||||
{
|
||||
VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
|
||||
|
||||
blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
|
||||
vexp);
|
||||
}
|
||||
|
||||
const BlockExportDriver blk_exp_vhost_user_blk = {
|
||||
.type = BLOCK_EXPORT_TYPE_VHOST_USER_BLK,
|
||||
.instance_size = sizeof(VuBlkExport),
|
||||
.create = vu_blk_exp_create,
|
||||
.delete = vu_blk_exp_delete,
|
||||
.request_shutdown = vu_blk_exp_request_shutdown,
|
||||
};
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Sharing QEMU block devices via vhost-user protocal
|
||||
*
|
||||
* Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
|
||||
* Copyright (c) 2020 Red Hat, Inc.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* later. See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef VHOST_USER_BLK_SERVER_H
|
||||
#define VHOST_USER_BLK_SERVER_H
|
||||
|
||||
#include "block/export.h"
|
||||
|
||||
/* For block/export/export.c */
|
||||
extern const BlockExportDriver blk_exp_vhost_user_blk;
|
||||
|
||||
#endif /* VHOST_USER_BLK_SERVER_H */
|
|
@ -1698,6 +1698,7 @@ static int handle_aiocb_write_zeroes_unmap(void *opaque)
|
|||
switch (ret) {
|
||||
case -ENOTSUP:
|
||||
case -EINVAL:
|
||||
case -EBUSY:
|
||||
break;
|
||||
default:
|
||||
return ret;
|
||||
|
@ -2110,7 +2111,7 @@ static void raw_aio_attach_aio_context(BlockDriverState *bs,
|
|||
#endif
|
||||
#ifdef CONFIG_LINUX_IO_URING
|
||||
if (s->use_linux_io_uring) {
|
||||
Error *local_err;
|
||||
Error *local_err = NULL;
|
||||
if (!aio_setup_linux_io_uring(new_context, &local_err)) {
|
||||
error_reportf_err(local_err, "Unable to use linux io_uring, "
|
||||
"falling back to thread pool: ");
|
||||
|
@ -2925,7 +2926,6 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
|
|||
#ifdef CONFIG_FALLOCATE
|
||||
if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
|
||||
BdrvTrackedRequest *req;
|
||||
uint64_t end;
|
||||
|
||||
/*
|
||||
* This is a workaround for a bug in the Linux XFS driver,
|
||||
|
@ -2949,9 +2949,9 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
|
|||
assert(req->offset <= offset);
|
||||
assert(req->offset + req->bytes >= offset + bytes);
|
||||
|
||||
end = INT64_MAX & -(uint64_t)bs->bl.request_alignment;
|
||||
req->bytes = end - req->offset;
|
||||
req->overlap_bytes = req->bytes;
|
||||
req->bytes = BDRV_MAX_LENGTH - req->offset;
|
||||
|
||||
assert(bdrv_check_request(req->offset, req->bytes) == 0);
|
||||
|
||||
bdrv_mark_request_serialising(req, bs->bl.request_alignment);
|
||||
}
|
||||
|
@ -3103,7 +3103,7 @@ static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared,
|
|||
}
|
||||
|
||||
/* Copy locks to the new fd */
|
||||
if (s->perm_change_fd) {
|
||||
if (s->perm_change_fd && s->use_lock) {
|
||||
ret = raw_apply_lock_bytes(NULL, s->perm_change_fd, perm, ~shared,
|
||||
false, errp);
|
||||
if (ret < 0) {
|
||||
|
|
311
block/io.c
311
block/io.c
|
@ -135,10 +135,10 @@ static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
|
|||
|
||||
void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
BlockDriver *drv = bs->drv;
|
||||
BdrvChild *c;
|
||||
bool have_limits;
|
||||
Error *local_err = NULL;
|
||||
|
||||
memset(&bs->bl, 0, sizeof(bs->bl));
|
||||
|
||||
|
@ -156,9 +156,8 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
QLIST_FOREACH(c, &bs->children, next) {
|
||||
if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
|
||||
{
|
||||
bdrv_refresh_limits(c->bs, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
bdrv_refresh_limits(c->bs, errp);
|
||||
if (*errp) {
|
||||
return;
|
||||
}
|
||||
bdrv_merge_limits(&bs->bl, &c->bs->bl);
|
||||
|
@ -177,6 +176,13 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
/* Then let the driver override it */
|
||||
if (drv->bdrv_refresh_limits) {
|
||||
drv->bdrv_refresh_limits(bs, errp);
|
||||
if (*errp) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
|
||||
error_setg(errp, "Driver requires too large request alignment");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -306,17 +312,7 @@ static void bdrv_co_drain_bh_cb(void *opaque)
|
|||
|
||||
if (bs) {
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
|
||||
|
||||
/*
|
||||
* When the coroutine yielded, the lock for its home context was
|
||||
* released, so we need to re-acquire it here. If it explicitly
|
||||
* acquired a different context, the lock is still held and we don't
|
||||
* want to lock it a second time (or AIO_WAIT_WHILE() would hang).
|
||||
*/
|
||||
if (ctx == co_ctx) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_dec_in_flight(bs);
|
||||
if (data->begin) {
|
||||
assert(!data->drained_end_counter);
|
||||
|
@ -328,9 +324,7 @@ static void bdrv_co_drain_bh_cb(void *opaque)
|
|||
data->ignore_bds_parents,
|
||||
data->drained_end_counter);
|
||||
}
|
||||
if (ctx == co_ctx) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
} else {
|
||||
assert(data->begin);
|
||||
bdrv_drain_all_begin();
|
||||
|
@ -348,13 +342,16 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
|||
int *drained_end_counter)
|
||||
{
|
||||
BdrvCoDrainData data;
|
||||
Coroutine *self = qemu_coroutine_self();
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
|
||||
|
||||
/* Calling bdrv_drain() from a BH ensures the current coroutine yields and
|
||||
* other coroutines run if they were queued by aio_co_enter(). */
|
||||
|
||||
assert(qemu_in_coroutine());
|
||||
data = (BdrvCoDrainData) {
|
||||
.co = qemu_coroutine_self(),
|
||||
.co = self,
|
||||
.bs = bs,
|
||||
.done = false,
|
||||
.begin = begin,
|
||||
|
@ -368,13 +365,29 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
|||
if (bs) {
|
||||
bdrv_inc_in_flight(bs);
|
||||
}
|
||||
replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs),
|
||||
bdrv_co_drain_bh_cb, &data);
|
||||
|
||||
/*
|
||||
* Temporarily drop the lock across yield or we would get deadlocks.
|
||||
* bdrv_co_drain_bh_cb() reaquires the lock as needed.
|
||||
*
|
||||
* When we yield below, the lock for the current context will be
|
||||
* released, so if this is actually the lock that protects bs, don't drop
|
||||
* it a second time.
|
||||
*/
|
||||
if (ctx != co_ctx) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data);
|
||||
|
||||
qemu_coroutine_yield();
|
||||
/* If we are resumed from some other event (such as an aio completion or a
|
||||
* timer callback), it is a bug in the caller that should be fixed. */
|
||||
assert(data.done);
|
||||
|
||||
/* Reaquire the AioContext of bs if we dropped it */
|
||||
if (ctx != co_ctx) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
|
||||
|
@ -633,6 +646,19 @@ void bdrv_drain_all_begin(void)
|
|||
}
|
||||
}
|
||||
|
||||
void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
|
||||
g_assert(bs->quiesce_counter > 0);
|
||||
g_assert(!bs->refcnt);
|
||||
|
||||
while (bs->quiesce_counter) {
|
||||
bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
|
||||
}
|
||||
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
void bdrv_drain_all_end(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
|
@ -872,18 +898,31 @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self
|
|||
return waited;
|
||||
}
|
||||
|
||||
static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
|
||||
size_t size)
|
||||
int bdrv_check_request(int64_t offset, int64_t bytes)
|
||||
{
|
||||
if (size > BDRV_REQUEST_MAX_BYTES) {
|
||||
if (offset < 0 || bytes < 0) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!bdrv_is_inserted(bs)) {
|
||||
return -ENOMEDIUM;
|
||||
if (bytes > BDRV_MAX_LENGTH) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (offset < 0) {
|
||||
if (offset > BDRV_MAX_LENGTH - bytes) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bdrv_check_request32(int64_t offset, int64_t bytes)
|
||||
{
|
||||
int ret = bdrv_check_request(offset, bytes);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (bytes > BDRV_REQUEST_MAX_BYTES) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -1630,7 +1669,11 @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
|
|||
|
||||
trace_bdrv_co_preadv(bs, offset, bytes, flags);
|
||||
|
||||
ret = bdrv_check_byte_request(bs, offset, bytes);
|
||||
if (!bdrv_is_inserted(bs)) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
ret = bdrv_check_request32(offset, bytes);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -2042,11 +2085,11 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
|
|||
|
||||
trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
|
||||
|
||||
if (!bs->drv) {
|
||||
if (!bdrv_is_inserted(bs)) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
ret = bdrv_check_byte_request(bs, offset, bytes);
|
||||
ret = bdrv_check_request32(offset, bytes);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -2282,17 +2325,17 @@ static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
|
|||
|
||||
if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
|
||||
ret |= BDRV_BLOCK_ALLOCATED;
|
||||
} else if (want_zero && bs->drv->supports_backing) {
|
||||
} else if (bs->drv->supports_backing) {
|
||||
BlockDriverState *cow_bs = bdrv_cow_bs(bs);
|
||||
|
||||
if (cow_bs) {
|
||||
if (!cow_bs) {
|
||||
ret |= BDRV_BLOCK_ZERO;
|
||||
} else if (want_zero) {
|
||||
int64_t size2 = bdrv_getlength(cow_bs);
|
||||
|
||||
if (size2 >= 0 && offset >= size2) {
|
||||
ret |= BDRV_BLOCK_ZERO;
|
||||
}
|
||||
} else {
|
||||
ret |= BDRV_BLOCK_ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2343,41 +2386,101 @@ early_out:
|
|||
int coroutine_fn
|
||||
bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
bool include_base,
|
||||
bool want_zero,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
int64_t *pnum,
|
||||
int64_t *map,
|
||||
BlockDriverState **file)
|
||||
BlockDriverState **file,
|
||||
int *depth)
|
||||
{
|
||||
int ret;
|
||||
BlockDriverState *p;
|
||||
int ret = 0;
|
||||
bool first = true;
|
||||
int64_t eof = 0;
|
||||
int dummy;
|
||||
|
||||
assert(bs != base);
|
||||
for (p = bs; p != base; p = bdrv_filter_or_cow_bs(p)) {
|
||||
assert(!include_base || base); /* Can't include NULL base */
|
||||
|
||||
if (!depth) {
|
||||
depth = &dummy;
|
||||
}
|
||||
*depth = 0;
|
||||
|
||||
if (!include_base && bs == base) {
|
||||
*pnum = bytes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
|
||||
++*depth;
|
||||
if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret & BDRV_BLOCK_EOF) {
|
||||
eof = offset + *pnum;
|
||||
}
|
||||
|
||||
assert(*pnum <= bytes);
|
||||
bytes = *pnum;
|
||||
|
||||
for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
|
||||
p = bdrv_filter_or_cow_bs(p))
|
||||
{
|
||||
ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
|
||||
file);
|
||||
++*depth;
|
||||
if (ret < 0) {
|
||||
break;
|
||||
return ret;
|
||||
}
|
||||
if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
|
||||
if (*pnum == 0) {
|
||||
/*
|
||||
* Reading beyond the end of the file continues to read
|
||||
* zeroes, but we can only widen the result to the
|
||||
* unallocated length we learned from an earlier
|
||||
* iteration.
|
||||
* The top layer deferred to this layer, and because this layer is
|
||||
* short, any zeroes that we synthesize beyond EOF behave as if they
|
||||
* were allocated at this layer.
|
||||
*
|
||||
* We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
|
||||
* larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
|
||||
* below.
|
||||
*/
|
||||
assert(ret & BDRV_BLOCK_EOF);
|
||||
*pnum = bytes;
|
||||
}
|
||||
if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
|
||||
if (file) {
|
||||
*file = p;
|
||||
}
|
||||
ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
|
||||
break;
|
||||
}
|
||||
/* [offset, pnum] unallocated on this layer, which could be only
|
||||
* the first part of [offset, bytes]. */
|
||||
bytes = MIN(bytes, *pnum);
|
||||
first = false;
|
||||
if (ret & BDRV_BLOCK_ALLOCATED) {
|
||||
/*
|
||||
* We've found the node and the status, we must break.
|
||||
*
|
||||
* Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
|
||||
* larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
|
||||
* below.
|
||||
*/
|
||||
ret &= ~BDRV_BLOCK_EOF;
|
||||
break;
|
||||
}
|
||||
|
||||
if (p == base) {
|
||||
assert(include_base);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, [offset, offset + *pnum) region is unallocated on this layer,
|
||||
* let's continue the diving.
|
||||
*/
|
||||
assert(*pnum <= bytes);
|
||||
bytes = *pnum;
|
||||
}
|
||||
|
||||
if (offset + *pnum == eof) {
|
||||
ret |= BDRV_BLOCK_EOF;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2385,8 +2488,8 @@ int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
|
|||
int64_t offset, int64_t bytes, int64_t *pnum,
|
||||
int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
return bdrv_common_block_status_above(bs, base, true, offset, bytes,
|
||||
pnum, map, file);
|
||||
return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
|
||||
pnum, map, file, NULL);
|
||||
}
|
||||
|
||||
int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
|
@ -2396,14 +2499,41 @@ int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
|||
offset, bytes, pnum, map, file);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check @bs (and its backing chain) to see if the range defined
|
||||
* by @offset and @bytes is known to read as zeroes.
|
||||
* Return 1 if that is the case, 0 otherwise and -errno on error.
|
||||
* This test is meant to be fast rather than accurate so returning 0
|
||||
* does not guarantee non-zero data.
|
||||
*/
|
||||
int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes)
|
||||
{
|
||||
int ret;
|
||||
int64_t pnum = bytes;
|
||||
|
||||
if (!bytes) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
ret = bdrv_common_block_status_above(bs, NULL, false, false, offset,
|
||||
bytes, &pnum, NULL, NULL, NULL);
|
||||
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
|
||||
}
|
||||
|
||||
int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int ret;
|
||||
int64_t dummy;
|
||||
|
||||
ret = bdrv_common_block_status_above(bs, bdrv_filter_or_cow_bs(bs), false,
|
||||
offset, bytes, pnum ? pnum : &dummy,
|
||||
ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
|
||||
bytes, pnum ? pnum : &dummy, NULL,
|
||||
NULL, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
|
@ -2414,8 +2544,9 @@ int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
|
|||
/*
|
||||
* Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
|
||||
*
|
||||
* Return 1 if (a prefix of) the given range is allocated in any image
|
||||
* between BASE and TOP (BASE is only included if include_base is set).
|
||||
* Return a positive depth if (a prefix of) the given range is allocated
|
||||
* in any image between BASE and TOP (BASE is only included if include_base
|
||||
* is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
|
||||
* BASE can be NULL to check if the given offset is allocated in any
|
||||
* image of the chain. Return 0 otherwise, or negative errno on
|
||||
* failure.
|
||||
|
@ -2426,51 +2557,23 @@ int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
|
|||
* at 'offset + *pnum' may return the same allocation status (in other
|
||||
* words, the result is not necessarily the maximum possible range);
|
||||
* but 'pnum' will only be 0 when end of file is reached.
|
||||
*
|
||||
*/
|
||||
int bdrv_is_allocated_above(BlockDriverState *top,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
BlockDriverState *intermediate;
|
||||
int ret;
|
||||
int64_t n = bytes;
|
||||
|
||||
assert(base || !include_base);
|
||||
|
||||
intermediate = top;
|
||||
while (include_base || intermediate != base) {
|
||||
int64_t pnum_inter;
|
||||
int64_t size_inter;
|
||||
|
||||
assert(intermediate);
|
||||
ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
if (ret) {
|
||||
*pnum = pnum_inter;
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_inter = bdrv_getlength(intermediate);
|
||||
if (size_inter < 0) {
|
||||
return size_inter;
|
||||
}
|
||||
if (n > pnum_inter &&
|
||||
(intermediate == top || offset + pnum_inter < size_inter)) {
|
||||
n = pnum_inter;
|
||||
}
|
||||
|
||||
if (intermediate == base) {
|
||||
break;
|
||||
}
|
||||
|
||||
intermediate = bdrv_filter_or_cow_bs(intermediate);
|
||||
int depth;
|
||||
int ret = bdrv_common_block_status_above(top, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
*pnum = n;
|
||||
if (ret & BDRV_BLOCK_ALLOCATED) {
|
||||
return depth;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2716,8 +2819,9 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) {
|
||||
return -EIO;
|
||||
ret = bdrv_check_request(offset, bytes);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Do nothing if disabled. */
|
||||
|
@ -2973,10 +3077,10 @@ static int coroutine_fn bdrv_co_copy_range_internal(
|
|||
assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
|
||||
assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
|
||||
|
||||
if (!dst || !dst->bs) {
|
||||
if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
|
||||
ret = bdrv_check_request32(dst_offset, bytes);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -2984,10 +3088,10 @@ static int coroutine_fn bdrv_co_copy_range_internal(
|
|||
return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
|
||||
}
|
||||
|
||||
if (!src || !src->bs) {
|
||||
if (!src || !src->bs || !bdrv_is_inserted(src->bs)) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
|
||||
ret = bdrv_check_request32(src_offset, bytes);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -3117,6 +3221,13 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = bdrv_check_request(offset, 0);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Required too big image size, it must be not greater "
|
||||
"than %" PRId64, BDRV_MAX_LENGTH);
|
||||
return ret;
|
||||
}
|
||||
|
||||
old_size = bdrv_getlength(bs);
|
||||
if (old_size < 0) {
|
||||
error_setg_errno(errp, -old_size, "Failed to get old image size");
|
||||
|
|
|
@ -425,6 +425,6 @@ LuringState *luring_init(Error **errp)
|
|||
void luring_cleanup(LuringState *s)
|
||||
{
|
||||
io_uring_queue_exit(&s->ring);
|
||||
g_free(s);
|
||||
trace_luring_cleanup_state(s);
|
||||
g_free(s);
|
||||
}
|
||||
|
|
|
@ -322,25 +322,23 @@ iscsi_aio_cancel(BlockAIOCB *blockacb)
|
|||
IscsiAIOCB *acb = (IscsiAIOCB *)blockacb;
|
||||
IscsiLun *iscsilun = acb->iscsilun;
|
||||
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
WITH_QEMU_LOCK_GUARD(&iscsilun->mutex) {
|
||||
|
||||
/* If it was cancelled or completed already, our work is done here */
|
||||
if (acb->cancelled || acb->status != -EINPROGRESS) {
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
return;
|
||||
/* If it was cancelled or completed already, our work is done here */
|
||||
if (acb->cancelled || acb->status != -EINPROGRESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
acb->cancelled = true;
|
||||
|
||||
qemu_aio_ref(acb); /* released in iscsi_abort_task_cb() */
|
||||
|
||||
/* send a task mgmt call to the target to cancel the task on the target */
|
||||
if (iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task,
|
||||
iscsi_abort_task_cb, acb) < 0) {
|
||||
qemu_aio_unref(acb); /* since iscsi_abort_task_cb() won't be called */
|
||||
}
|
||||
}
|
||||
|
||||
acb->cancelled = true;
|
||||
|
||||
qemu_aio_ref(acb); /* released in iscsi_abort_task_cb() */
|
||||
|
||||
/* send a task mgmt call to the target to cancel the task on the target */
|
||||
if (iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task,
|
||||
iscsi_abort_task_cb, acb) < 0) {
|
||||
qemu_aio_unref(acb); /* since iscsi_abort_task_cb() won't be called */
|
||||
}
|
||||
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
static const AIOCBInfo iscsi_aiocb_info = {
|
||||
|
@ -375,22 +373,22 @@ static void iscsi_timed_check_events(void *opaque)
|
|||
{
|
||||
IscsiLun *iscsilun = opaque;
|
||||
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
WITH_QEMU_LOCK_GUARD(&iscsilun->mutex) {
|
||||
/* check for timed out requests */
|
||||
iscsi_service(iscsilun->iscsi, 0);
|
||||
|
||||
/* check for timed out requests */
|
||||
iscsi_service(iscsilun->iscsi, 0);
|
||||
if (iscsilun->request_timed_out) {
|
||||
iscsilun->request_timed_out = false;
|
||||
iscsi_reconnect(iscsilun->iscsi);
|
||||
}
|
||||
|
||||
if (iscsilun->request_timed_out) {
|
||||
iscsilun->request_timed_out = false;
|
||||
iscsi_reconnect(iscsilun->iscsi);
|
||||
/*
|
||||
* newer versions of libiscsi may return zero events. Ensure we are
|
||||
* able to return to service once this situation changes.
|
||||
*/
|
||||
iscsi_set_events(iscsilun);
|
||||
}
|
||||
|
||||
/* newer versions of libiscsi may return zero events. Ensure we are able
|
||||
* to return to service once this situation changes. */
|
||||
iscsi_set_events(iscsilun);
|
||||
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
|
||||
timer_mod(iscsilun->event_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL);
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ block_ss.add(files(
|
|||
'backup-top.c',
|
||||
'blkdebug.c',
|
||||
'blklogwrites.c',
|
||||
'blkreplay.c',
|
||||
'blkverify.c',
|
||||
'block-backend.c',
|
||||
'block-copy.c',
|
||||
|
@ -42,6 +41,8 @@ block_ss.add(files(
|
|||
'write-threshold.c',
|
||||
), zstd, zlib)
|
||||
|
||||
softmmu_ss.add(when: 'CONFIG_TCG', if_true: files('blkreplay.c'))
|
||||
|
||||
block_ss.add(when: 'CONFIG_QCOW1', if_true: files('qcow.c'))
|
||||
block_ss.add(when: 'CONFIG_VDI', if_true: files('vdi.c'))
|
||||
block_ss.add(when: 'CONFIG_CLOOP', if_true: files('cloop.c'))
|
||||
|
|
|
@ -846,7 +846,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
|
|||
}
|
||||
|
||||
assert(count);
|
||||
if (ret == 1) {
|
||||
if (ret > 0) {
|
||||
bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
|
||||
}
|
||||
offset += count;
|
||||
|
|
26
block/nbd.c
26
block/nbd.c
|
@ -135,6 +135,7 @@ typedef struct BDRVNBDState {
|
|||
QCryptoTLSCreds *tlscreds;
|
||||
const char *hostname;
|
||||
char *x_dirty_bitmap;
|
||||
bool alloc_depth;
|
||||
|
||||
bool wait_connect;
|
||||
NBDConnectThread *connect_thread;
|
||||
|
@ -961,6 +962,16 @@ static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
|
|||
trace_nbd_parse_blockstatus_compliance("extent length too large");
|
||||
}
|
||||
|
||||
/*
|
||||
* HACK: if we are using x-dirty-bitmaps to access
|
||||
* qemu:allocation-depth, treat all depths > 2 the same as 2,
|
||||
* since nbd_client_co_block_status is only expecting the low two
|
||||
* bits to be set.
|
||||
*/
|
||||
if (s->alloc_depth && extent->flags > 2) {
|
||||
extent->flags = 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1795,11 +1806,16 @@ static int nbd_client_handshake(BlockDriverState *bs, QIOChannelSocket *sioc,
|
|||
s->sioc = NULL;
|
||||
return ret;
|
||||
}
|
||||
if (s->x_dirty_bitmap && !s->info.base_allocation) {
|
||||
error_setg(errp, "requested x-dirty-bitmap %s not found",
|
||||
s->x_dirty_bitmap);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
if (s->x_dirty_bitmap) {
|
||||
if (!s->info.base_allocation) {
|
||||
error_setg(errp, "requested x-dirty-bitmap %s not found",
|
||||
s->x_dirty_bitmap);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
|
||||
s->alloc_depth = true;
|
||||
}
|
||||
}
|
||||
if (s->info.flags & NBD_FLAG_READ_ONLY) {
|
||||
ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
|
||||
|
|
13
block/nfs.c
13
block/nfs.c
|
@ -24,7 +24,9 @@
|
|||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#if !defined(_WIN32)
|
||||
#include <poll.h>
|
||||
#endif
|
||||
#include "qemu/config-file.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qapi/error.h"
|
||||
|
@ -58,7 +60,7 @@ typedef struct NFSClient {
|
|||
bool has_zero_init;
|
||||
AioContext *aio_context;
|
||||
QemuMutex mutex;
|
||||
blkcnt_t st_blocks;
|
||||
uint64_t st_blocks;
|
||||
bool cache_used;
|
||||
NFSServer *server;
|
||||
char *path;
|
||||
|
@ -545,7 +547,9 @@ static int64_t nfs_client_open(NFSClient *client, BlockdevOptionsNfs *opts,
|
|||
}
|
||||
|
||||
ret = DIV_ROUND_UP(st.st_size, BDRV_SECTOR_SIZE);
|
||||
#if !defined(_WIN32)
|
||||
client->st_blocks = st.st_blocks;
|
||||
#endif
|
||||
client->has_zero_init = S_ISREG(st.st_mode);
|
||||
*strp = '/';
|
||||
goto out;
|
||||
|
@ -706,6 +710,7 @@ static int nfs_has_zero_init(BlockDriverState *bs)
|
|||
return client->has_zero_init;
|
||||
}
|
||||
|
||||
#if !defined(_WIN32)
|
||||
/* Called (via nfs_service) with QemuMutex held. */
|
||||
static void
|
||||
nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
|
||||
|
@ -748,6 +753,7 @@ static int64_t nfs_get_allocated_file_size(BlockDriverState *bs)
|
|||
|
||||
return (task.ret < 0 ? task.ret : st.st_blocks * 512);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int coroutine_fn
|
||||
nfs_file_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
|
||||
|
@ -800,7 +806,9 @@ static int nfs_reopen_prepare(BDRVReopenState *state,
|
|||
nfs_get_error(client->context));
|
||||
return ret;
|
||||
}
|
||||
#if !defined(_WIN32)
|
||||
client->st_blocks = st.st_blocks;
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -869,7 +877,10 @@ static BlockDriver bdrv_nfs = {
|
|||
.create_opts = &nfs_create_opts,
|
||||
|
||||
.bdrv_has_zero_init = nfs_has_zero_init,
|
||||
/* libnfs does not provide the allocated filesize of a file on win32. */
|
||||
#if !defined(_WIN32)
|
||||
.bdrv_get_allocated_file_size = nfs_get_allocated_file_size,
|
||||
#endif
|
||||
.bdrv_co_truncate = nfs_file_co_truncate,
|
||||
|
||||
.bdrv_file_open = nfs_file_open,
|
||||
|
|
236
block/nvme.c
236
block/nvme.c
|
@ -41,6 +41,16 @@
|
|||
|
||||
typedef struct BDRVNVMeState BDRVNVMeState;
|
||||
|
||||
/* Same index is used for queues and IRQs */
|
||||
#define INDEX_ADMIN 0
|
||||
#define INDEX_IO(n) (1 + n)
|
||||
|
||||
/* This driver shares a single MSIX IRQ for the admin and I/O queues */
|
||||
enum {
|
||||
MSIX_SHARED_IRQ_IDX = 0,
|
||||
MSIX_IRQ_COUNT = 1
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
int32_t head, tail;
|
||||
uint8_t *queue;
|
||||
|
@ -81,18 +91,10 @@ typedef struct {
|
|||
QEMUBH *completion_bh;
|
||||
} NVMeQueuePair;
|
||||
|
||||
#define INDEX_ADMIN 0
|
||||
#define INDEX_IO(n) (1 + n)
|
||||
|
||||
/* This driver shares a single MSIX IRQ for the admin and I/O queues */
|
||||
enum {
|
||||
MSIX_SHARED_IRQ_IDX = 0,
|
||||
MSIX_IRQ_COUNT = 1
|
||||
};
|
||||
|
||||
struct BDRVNVMeState {
|
||||
AioContext *aio_context;
|
||||
QEMUVFIOState *vfio;
|
||||
void *bar0_wo_map;
|
||||
/* Memory mapped registers */
|
||||
volatile struct {
|
||||
uint32_t sq_tail;
|
||||
|
@ -103,7 +105,7 @@ struct BDRVNVMeState {
|
|||
* [1..]: io queues.
|
||||
*/
|
||||
NVMeQueuePair **queues;
|
||||
int nr_queues;
|
||||
unsigned queue_count;
|
||||
size_t page_size;
|
||||
/* How many uint32_t elements does each doorbell entry take. */
|
||||
size_t doorbell_scale;
|
||||
|
@ -128,6 +130,12 @@ struct BDRVNVMeState {
|
|||
|
||||
/* PCI address (required for nvme_refresh_filename()) */
|
||||
char *device;
|
||||
|
||||
struct {
|
||||
uint64_t completion_errors;
|
||||
uint64_t aligned_accesses;
|
||||
uint64_t unaligned_accesses;
|
||||
} stats;
|
||||
};
|
||||
|
||||
#define NVME_BLOCK_OPT_DEVICE "device"
|
||||
|
@ -153,28 +161,32 @@ static QemuOptsList runtime_opts = {
|
|||
},
|
||||
};
|
||||
|
||||
static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
|
||||
int nentries, int entry_bytes, Error **errp)
|
||||
/* Returns true on success, false on failure. */
|
||||
static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
|
||||
unsigned nentries, size_t entry_bytes, Error **errp)
|
||||
{
|
||||
size_t bytes;
|
||||
int r;
|
||||
|
||||
bytes = ROUND_UP(nentries * entry_bytes, s->page_size);
|
||||
bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size);
|
||||
q->head = q->tail = 0;
|
||||
q->queue = qemu_try_memalign(s->page_size, bytes);
|
||||
q->queue = qemu_try_memalign(qemu_real_host_page_size, bytes);
|
||||
if (!q->queue) {
|
||||
error_setg(errp, "Cannot allocate queue");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
memset(q->queue, 0, bytes);
|
||||
r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
|
||||
if (r) {
|
||||
error_setg(errp, "Cannot map queue");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nvme_free_queue_pair(NVMeQueuePair *q)
|
||||
{
|
||||
trace_nvme_free_queue_pair(q->index, q);
|
||||
if (q->completion_bh) {
|
||||
qemu_bh_delete(q->completion_bh);
|
||||
}
|
||||
|
@ -198,31 +210,33 @@ static void nvme_free_req_queue_cb(void *opaque)
|
|||
|
||||
static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
|
||||
AioContext *aio_context,
|
||||
int idx, int size,
|
||||
unsigned idx, size_t size,
|
||||
Error **errp)
|
||||
{
|
||||
int i, r;
|
||||
Error *local_err = NULL;
|
||||
NVMeQueuePair *q;
|
||||
uint64_t prp_list_iova;
|
||||
size_t bytes;
|
||||
|
||||
q = g_try_new0(NVMeQueuePair, 1);
|
||||
if (!q) {
|
||||
return NULL;
|
||||
}
|
||||
q->prp_list_pages = qemu_try_memalign(s->page_size,
|
||||
s->page_size * NVME_NUM_REQS);
|
||||
trace_nvme_create_queue_pair(idx, q, size, aio_context,
|
||||
event_notifier_get_fd(s->irq_notifier));
|
||||
bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
|
||||
qemu_real_host_page_size);
|
||||
q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes);
|
||||
if (!q->prp_list_pages) {
|
||||
goto fail;
|
||||
}
|
||||
memset(q->prp_list_pages, 0, s->page_size * NVME_NUM_REQS);
|
||||
memset(q->prp_list_pages, 0, bytes);
|
||||
qemu_mutex_init(&q->lock);
|
||||
q->s = s;
|
||||
q->index = idx;
|
||||
qemu_co_queue_init(&q->free_req_queue);
|
||||
q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
|
||||
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
|
||||
s->page_size * NVME_NUM_REQS,
|
||||
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
|
||||
false, &prp_list_iova);
|
||||
if (r) {
|
||||
goto fail;
|
||||
|
@ -237,16 +251,12 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
|
|||
req->prp_list_iova = prp_list_iova + i * s->page_size;
|
||||
}
|
||||
|
||||
nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
if (!nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, errp)) {
|
||||
goto fail;
|
||||
}
|
||||
q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail;
|
||||
|
||||
nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
if (!nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, errp)) {
|
||||
goto fail;
|
||||
}
|
||||
q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head;
|
||||
|
@ -286,7 +296,7 @@ static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
|
|||
|
||||
while (q->free_req_head == -1) {
|
||||
if (qemu_in_coroutine()) {
|
||||
trace_nvme_free_req_queue_wait(q);
|
||||
trace_nvme_free_req_queue_wait(q->s, q->index);
|
||||
qemu_co_queue_wait(&q->free_req_queue, &q->lock);
|
||||
} else {
|
||||
qemu_mutex_unlock(&q->lock);
|
||||
|
@ -384,14 +394,17 @@ static bool nvme_process_completion(NVMeQueuePair *q)
|
|||
break;
|
||||
}
|
||||
ret = nvme_translate_error(c);
|
||||
if (ret) {
|
||||
s->stats.completion_errors++;
|
||||
}
|
||||
q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
|
||||
if (!q->cq.head) {
|
||||
q->cq_phase = !q->cq_phase;
|
||||
}
|
||||
cid = le16_to_cpu(c->cid);
|
||||
if (cid == 0 || cid > NVME_QUEUE_SIZE) {
|
||||
fprintf(stderr, "Unexpected CID in completion queue: %" PRIu32 "\n",
|
||||
cid);
|
||||
warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32", "
|
||||
"queue size: %u", cid, NVME_QUEUE_SIZE);
|
||||
continue;
|
||||
}
|
||||
trace_nvme_complete_command(s, q->index, cid);
|
||||
|
@ -456,7 +469,7 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
|
|||
assert(!req->cb);
|
||||
req->cb = cb;
|
||||
req->opaque = opaque;
|
||||
cmd->cid = cpu_to_le32(req->cid);
|
||||
cmd->cid = cpu_to_le16(req->cid);
|
||||
|
||||
trace_nvme_submit_command(q->s, q->index, req->cid);
|
||||
nvme_trace_command(cmd);
|
||||
|
@ -470,16 +483,17 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
|
|||
qemu_mutex_unlock(&q->lock);
|
||||
}
|
||||
|
||||
static void nvme_cmd_sync_cb(void *opaque, int ret)
|
||||
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
|
||||
{
|
||||
int *pret = opaque;
|
||||
*pret = ret;
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
|
||||
NvmeCmd *cmd)
|
||||
static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
NVMeQueuePair *q = s->queues[INDEX_ADMIN];
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
NVMeRequest *req;
|
||||
int ret = -EINPROGRESS;
|
||||
|
@ -487,15 +501,17 @@ static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
|
|||
if (!req) {
|
||||
return -EBUSY;
|
||||
}
|
||||
nvme_submit_command(q, req, cmd, nvme_cmd_sync_cb, &ret);
|
||||
nvme_submit_command(q, req, cmd, nvme_admin_cmd_sync_cb, &ret);
|
||||
|
||||
AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
||||
/* Returns true on success, false on failure. */
|
||||
static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
bool ret = false;
|
||||
union {
|
||||
NvmeIdCtrl ctrl;
|
||||
NvmeIdNs ns;
|
||||
|
@ -508,21 +524,22 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
|||
.opcode = NVME_ADM_CMD_IDENTIFY,
|
||||
.cdw10 = cpu_to_le32(0x1),
|
||||
};
|
||||
size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size);
|
||||
|
||||
id = qemu_try_memalign(s->page_size, sizeof(*id));
|
||||
id = qemu_try_memalign(qemu_real_host_page_size, id_size);
|
||||
if (!id) {
|
||||
error_setg(errp, "Cannot allocate buffer for identify response");
|
||||
goto out;
|
||||
}
|
||||
r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova);
|
||||
r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova);
|
||||
if (r) {
|
||||
error_setg(errp, "Cannot map buffer for DMA");
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(id, 0, sizeof(*id));
|
||||
memset(id, 0, id_size);
|
||||
cmd.dptr.prp1 = cpu_to_le64(iova);
|
||||
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||
if (nvme_admin_cmd_sync(bs, &cmd)) {
|
||||
error_setg(errp, "Failed to identify controller");
|
||||
goto out;
|
||||
}
|
||||
|
@ -542,10 +559,10 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
|||
s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES);
|
||||
s->supports_discard = !!(oncs & NVME_ONCS_DSM);
|
||||
|
||||
memset(id, 0, sizeof(*id));
|
||||
memset(id, 0, id_size);
|
||||
cmd.cdw10 = 0;
|
||||
cmd.nsid = cpu_to_le32(namespace);
|
||||
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||
if (nvme_admin_cmd_sync(bs, &cmd)) {
|
||||
error_setg(errp, "Failed to identify namespace");
|
||||
goto out;
|
||||
}
|
||||
|
@ -572,10 +589,13 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = true;
|
||||
s->blkshift = lbaf->ds;
|
||||
out:
|
||||
qemu_vfio_dma_unmap(s->vfio, id);
|
||||
qemu_vfree(id);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool nvme_poll_queue(NVMeQueuePair *q)
|
||||
|
@ -585,6 +605,7 @@ static bool nvme_poll_queue(NVMeQueuePair *q)
|
|||
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
|
||||
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
|
||||
|
||||
trace_nvme_poll_queue(q->s, q->index);
|
||||
/*
|
||||
* Do an early check for completions. q->lock isn't needed because
|
||||
* nvme_process_completion() only runs in the event loop thread and
|
||||
|
@ -609,7 +630,7 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
|
|||
bool progress = false;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < s->nr_queues; i++) {
|
||||
for (i = 0; i < s->queue_count; i++) {
|
||||
if (nvme_poll_queue(s->queues[i])) {
|
||||
progress = true;
|
||||
}
|
||||
|
@ -630,11 +651,12 @@ static void nvme_handle_event(EventNotifier *n)
|
|||
static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
int n = s->nr_queues;
|
||||
unsigned n = s->queue_count;
|
||||
NVMeQueuePair *q;
|
||||
NvmeCmd cmd;
|
||||
int queue_size = NVME_QUEUE_SIZE;
|
||||
unsigned queue_size = NVME_QUEUE_SIZE;
|
||||
|
||||
assert(n <= UINT16_MAX);
|
||||
q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
|
||||
n, queue_size, errp);
|
||||
if (!q) {
|
||||
|
@ -643,26 +665,26 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
|||
cmd = (NvmeCmd) {
|
||||
.opcode = NVME_ADM_CMD_CREATE_CQ,
|
||||
.dptr.prp1 = cpu_to_le64(q->cq.iova),
|
||||
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
|
||||
.cdw11 = cpu_to_le32(0x3),
|
||||
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
|
||||
.cdw11 = cpu_to_le32(NVME_CQ_IEN | NVME_CQ_PC),
|
||||
};
|
||||
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||
error_setg(errp, "Failed to create CQ io queue [%d]", n);
|
||||
if (nvme_admin_cmd_sync(bs, &cmd)) {
|
||||
error_setg(errp, "Failed to create CQ io queue [%u]", n);
|
||||
goto out_error;
|
||||
}
|
||||
cmd = (NvmeCmd) {
|
||||
.opcode = NVME_ADM_CMD_CREATE_SQ,
|
||||
.dptr.prp1 = cpu_to_le64(q->sq.iova),
|
||||
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
|
||||
.cdw11 = cpu_to_le32(0x1 | (n << 16)),
|
||||
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
|
||||
.cdw11 = cpu_to_le32(NVME_SQ_PC | (n << 16)),
|
||||
};
|
||||
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||
error_setg(errp, "Failed to create SQ io queue [%d]", n);
|
||||
if (nvme_admin_cmd_sync(bs, &cmd)) {
|
||||
error_setg(errp, "Failed to create SQ io queue [%u]", n);
|
||||
goto out_error;
|
||||
}
|
||||
s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
|
||||
s->queues[n] = q;
|
||||
s->nr_queues++;
|
||||
s->queue_count++;
|
||||
return true;
|
||||
out_error:
|
||||
nvme_free_queue_pair(q);
|
||||
|
@ -675,7 +697,6 @@ static bool nvme_poll_cb(void *opaque)
|
|||
BDRVNVMeState *s = container_of(e, BDRVNVMeState,
|
||||
irq_notifier[MSIX_SHARED_IRQ_IDX]);
|
||||
|
||||
trace_nvme_poll_cb(s);
|
||||
return nvme_poll_queues(s);
|
||||
}
|
||||
|
||||
|
@ -683,12 +704,12 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
Error **errp)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
NVMeQueuePair *q;
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
int ret;
|
||||
uint64_t cap;
|
||||
uint64_t timeout_ms;
|
||||
uint64_t deadline, now;
|
||||
Error *local_err = NULL;
|
||||
volatile NvmeBar *regs = NULL;
|
||||
|
||||
qemu_co_mutex_init(&s->dma_map_lock);
|
||||
|
@ -718,15 +739,29 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
* Initialization". */
|
||||
|
||||
cap = le64_to_cpu(regs->cap);
|
||||
trace_nvme_controller_capability_raw(cap);
|
||||
trace_nvme_controller_capability("Maximum Queue Entries Supported",
|
||||
1 + NVME_CAP_MQES(cap));
|
||||
trace_nvme_controller_capability("Contiguous Queues Required",
|
||||
NVME_CAP_CQR(cap));
|
||||
trace_nvme_controller_capability("Doorbell Stride",
|
||||
2 << (2 + NVME_CAP_DSTRD(cap)));
|
||||
trace_nvme_controller_capability("Subsystem Reset Supported",
|
||||
NVME_CAP_NSSRS(cap));
|
||||
trace_nvme_controller_capability("Memory Page Size Minimum",
|
||||
1 << (12 + NVME_CAP_MPSMIN(cap)));
|
||||
trace_nvme_controller_capability("Memory Page Size Maximum",
|
||||
1 << (12 + NVME_CAP_MPSMAX(cap)));
|
||||
if (!NVME_CAP_CSS(cap)) {
|
||||
error_setg(errp, "Device doesn't support NVMe command set");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->page_size = MAX(4096, 1 << NVME_CAP_MPSMIN(cap));
|
||||
s->page_size = 1u << (12 + NVME_CAP_MPSMIN(cap));
|
||||
s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t);
|
||||
bs->bl.opt_mem_alignment = s->page_size;
|
||||
bs->bl.request_alignment = s->page_size;
|
||||
timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
|
||||
|
||||
/* Reset device to get a clean state. */
|
||||
|
@ -743,8 +778,10 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
}
|
||||
}
|
||||
|
||||
s->doorbells = qemu_vfio_pci_map_bar(s->vfio, 0, sizeof(NvmeBar),
|
||||
NVME_DOORBELL_SIZE, PROT_WRITE, errp);
|
||||
s->bar0_wo_map = qemu_vfio_pci_map_bar(s->vfio, 0, 0,
|
||||
sizeof(NvmeBar) + NVME_DOORBELL_SIZE,
|
||||
PROT_WRITE, errp);
|
||||
s->doorbells = (void *)((uintptr_t)s->bar0_wo_map + sizeof(NvmeBar));
|
||||
if (!s->doorbells) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -752,19 +789,18 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
|
||||
/* Set up admin queue. */
|
||||
s->queues = g_new(NVMeQueuePair *, 1);
|
||||
s->queues[INDEX_ADMIN] = nvme_create_queue_pair(s, aio_context, 0,
|
||||
NVME_QUEUE_SIZE,
|
||||
errp);
|
||||
if (!s->queues[INDEX_ADMIN]) {
|
||||
q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp);
|
||||
if (!q) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
s->nr_queues = 1;
|
||||
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
|
||||
regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << AQA_ACQS_SHIFT) |
|
||||
(NVME_QUEUE_SIZE << AQA_ASQS_SHIFT));
|
||||
regs->asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
|
||||
regs->acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
|
||||
s->queues[INDEX_ADMIN] = q;
|
||||
s->queue_count = 1;
|
||||
QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
|
||||
regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
|
||||
((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
|
||||
regs->asq = cpu_to_le64(q->sq.iova);
|
||||
regs->acq = cpu_to_le64(q->cq.iova);
|
||||
|
||||
/* After setting up all control registers we can enable device now. */
|
||||
regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
|
||||
|
@ -792,9 +828,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
|||
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
|
||||
false, nvme_handle_event, nvme_poll_cb);
|
||||
|
||||
nvme_identify(bs, namespace, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
if (!nvme_identify(bs, namespace, errp)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
@ -860,7 +894,7 @@ static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
|
|||
.cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
|
||||
};
|
||||
|
||||
ret = nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd);
|
||||
ret = nvme_admin_cmd_sync(bs, &cmd);
|
||||
if (ret) {
|
||||
error_setg(errp, "Failed to configure NVMe write cache");
|
||||
}
|
||||
|
@ -869,10 +903,9 @@ static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
|
|||
|
||||
static void nvme_close(BlockDriverState *bs)
|
||||
{
|
||||
int i;
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
for (i = 0; i < s->nr_queues; ++i) {
|
||||
for (unsigned i = 0; i < s->queue_count; ++i) {
|
||||
nvme_free_queue_pair(s->queues[i]);
|
||||
}
|
||||
g_free(s->queues);
|
||||
|
@ -880,8 +913,8 @@ static void nvme_close(BlockDriverState *bs)
|
|||
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
|
||||
false, NULL, NULL);
|
||||
event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
|
||||
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->doorbells,
|
||||
sizeof(NvmeBar), NVME_DOORBELL_SIZE);
|
||||
qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
|
||||
0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
|
||||
qemu_vfio_close(s->vfio);
|
||||
|
||||
g_free(s->device);
|
||||
|
@ -985,11 +1018,12 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
|
|||
for (i = 0; i < qiov->niov; ++i) {
|
||||
bool retry = true;
|
||||
uint64_t iova;
|
||||
size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
|
||||
qemu_real_host_page_size);
|
||||
try_map:
|
||||
r = qemu_vfio_dma_map(s->vfio,
|
||||
qiov->iov[i].iov_base,
|
||||
qiov->iov[i].iov_len,
|
||||
true, &iova);
|
||||
len, true, &iova);
|
||||
if (r == -ENOMEM && retry) {
|
||||
retry = false;
|
||||
trace_nvme_dma_flush_queue_wait(s);
|
||||
|
@ -1097,7 +1131,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
|
|||
};
|
||||
|
||||
trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
|
||||
assert(s->nr_queues > 1);
|
||||
assert(s->queue_count > 1);
|
||||
req = nvme_get_free_req(ioq);
|
||||
assert(req);
|
||||
|
||||
|
@ -1133,8 +1167,9 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs,
|
|||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
for (i = 0; i < qiov->niov; ++i) {
|
||||
if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||
|
||||
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {
|
||||
if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
|
||||
qemu_real_host_page_size) ||
|
||||
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {
|
||||
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
|
||||
qiov->iov[i].iov_len, s->page_size);
|
||||
return false;
|
||||
|
@ -1150,15 +1185,17 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|||
int r;
|
||||
uint8_t *buf = NULL;
|
||||
QEMUIOVector local_qiov;
|
||||
|
||||
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
|
||||
assert(QEMU_IS_ALIGNED(offset, s->page_size));
|
||||
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
|
||||
assert(bytes <= s->max_transfer);
|
||||
if (nvme_qiov_aligned(bs, qiov)) {
|
||||
s->stats.aligned_accesses++;
|
||||
return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
|
||||
}
|
||||
s->stats.unaligned_accesses++;
|
||||
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
|
||||
buf = qemu_try_memalign(s->page_size, bytes);
|
||||
buf = qemu_try_memalign(qemu_real_host_page_size, len);
|
||||
|
||||
if (!buf) {
|
||||
return -ENOMEM;
|
||||
|
@ -1205,7 +1242,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
|
|||
.ret = -EINPROGRESS,
|
||||
};
|
||||
|
||||
assert(s->nr_queues > 1);
|
||||
assert(s->queue_count > 1);
|
||||
req = nvme_get_free_req(ioq);
|
||||
assert(req);
|
||||
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
||||
|
@ -1257,7 +1294,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
cmd.cdw12 = cpu_to_le32(cdw12);
|
||||
|
||||
trace_nvme_write_zeroes(s, offset, bytes, flags);
|
||||
assert(s->nr_queues > 1);
|
||||
assert(s->queue_count > 1);
|
||||
req = nvme_get_free_req(ioq);
|
||||
assert(req);
|
||||
|
||||
|
@ -1300,7 +1337,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
|
|||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
assert(s->nr_queues > 1);
|
||||
assert(s->queue_count > 1);
|
||||
|
||||
buf = qemu_try_memalign(s->page_size, s->page_size);
|
||||
if (!buf) {
|
||||
|
@ -1380,7 +1417,7 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
|
|||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
for (int i = 0; i < s->nr_queues; i++) {
|
||||
for (unsigned i = 0; i < s->queue_count; i++) {
|
||||
NVMeQueuePair *q = s->queues[i];
|
||||
|
||||
qemu_bh_delete(q->completion_bh);
|
||||
|
@ -1401,7 +1438,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
|
|||
aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
|
||||
false, nvme_handle_event, nvme_poll_cb);
|
||||
|
||||
for (int i = 0; i < s->nr_queues; i++) {
|
||||
for (unsigned i = 0; i < s->queue_count; i++) {
|
||||
NVMeQueuePair *q = s->queues[i];
|
||||
|
||||
q->completion_bh =
|
||||
|
@ -1418,11 +1455,10 @@ static void nvme_aio_plug(BlockDriverState *bs)
|
|||
|
||||
static void nvme_aio_unplug(BlockDriverState *bs)
|
||||
{
|
||||
int i;
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
assert(s->plugged);
|
||||
s->plugged = false;
|
||||
for (i = INDEX_IO(0); i < s->nr_queues; i++) {
|
||||
for (unsigned i = INDEX_IO(0); i < s->queue_count; i++) {
|
||||
NVMeQueuePair *q = s->queues[i];
|
||||
qemu_mutex_lock(&q->lock);
|
||||
nvme_kick(q);
|
||||
|
@ -1452,6 +1488,21 @@ static void nvme_unregister_buf(BlockDriverState *bs, void *host)
|
|||
qemu_vfio_dma_unmap(s->vfio, host);
|
||||
}
|
||||
|
||||
static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs)
|
||||
{
|
||||
BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
stats->driver = BLOCKDEV_DRIVER_NVME;
|
||||
stats->u.nvme = (BlockStatsSpecificNvme) {
|
||||
.completion_errors = s->stats.completion_errors,
|
||||
.aligned_accesses = s->stats.aligned_accesses,
|
||||
.unaligned_accesses = s->stats.unaligned_accesses,
|
||||
};
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
static const char *const nvme_strong_runtime_opts[] = {
|
||||
NVME_BLOCK_OPT_DEVICE,
|
||||
NVME_BLOCK_OPT_NAMESPACE,
|
||||
|
@ -1485,6 +1536,7 @@ static BlockDriver bdrv_nvme = {
|
|||
.bdrv_refresh_filename = nvme_refresh_filename,
|
||||
.bdrv_refresh_limits = nvme_refresh_limits,
|
||||
.strong_runtime_opts = nvme_strong_runtime_opts,
|
||||
.bdrv_get_specific_stats = nvme_get_specific_stats,
|
||||
|
||||
.bdrv_detach_aio_context = nvme_detach_aio_context,
|
||||
.bdrv_attach_aio_context = nvme_attach_aio_context,
|
||||
|
|
|
@ -1049,6 +1049,8 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
|
|||
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
|
||||
|
||||
assert(l2_index + m->nb_clusters <= s->l2_slice_size);
|
||||
assert(m->cow_end.offset + m->cow_end.nb_bytes <=
|
||||
m->nb_clusters << s->cluster_bits);
|
||||
for (i = 0; i < m->nb_clusters; i++) {
|
||||
uint64_t offset = cluster_offset + ((uint64_t)i << s->cluster_bits);
|
||||
/* if two concurrent writes happen to the same unallocated cluster
|
||||
|
@ -1070,8 +1072,7 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
|
|||
if (has_subclusters(s) && !m->prealloc) {
|
||||
uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i);
|
||||
unsigned written_from = m->cow_start.offset;
|
||||
unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes ?:
|
||||
m->nb_clusters << s->cluster_bits;
|
||||
unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes;
|
||||
int first_sc, last_sc;
|
||||
/* Narrow written_from and written_to down to the current cluster */
|
||||
written_from = MAX(written_from, i << s->cluster_bits);
|
||||
|
@ -2009,14 +2010,17 @@ static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
|
|||
continue;
|
||||
}
|
||||
|
||||
/* First update L2 entries */
|
||||
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
|
||||
if (unmap) {
|
||||
qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST);
|
||||
}
|
||||
set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry);
|
||||
if (has_subclusters(s)) {
|
||||
set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
|
||||
}
|
||||
|
||||
/* Then decrease the refcount */
|
||||
if (unmap) {
|
||||
qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
||||
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
|
||||
|
|
|
@ -269,7 +269,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
|
|||
|
||||
case QCOW2_EXT_MAGIC_FEATURE_TABLE:
|
||||
if (p_feature_table != NULL) {
|
||||
void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
|
||||
void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
|
||||
ret = bdrv_pread(bs->file, offset , feature_table, ext.len);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
|
||||
|
@ -2361,15 +2361,26 @@ static bool merge_cow(uint64_t offset, unsigned bytes,
|
|||
continue;
|
||||
}
|
||||
|
||||
/* The data (middle) region must be immediately after the
|
||||
* start region */
|
||||
/*
|
||||
* The write request should start immediately after the first
|
||||
* COW region. This does not always happen because the area
|
||||
* touched by the request can be larger than the one defined
|
||||
* by @m (a single request can span an area consisting of a
|
||||
* mix of previously unallocated and allocated clusters, that
|
||||
* is why @l2meta is a list).
|
||||
*/
|
||||
if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
|
||||
/* In this case the request starts before this region */
|
||||
assert(offset < l2meta_cow_start(m));
|
||||
assert(m->cow_start.nb_bytes == 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* The end region must be immediately after the data (middle)
|
||||
* region */
|
||||
/* The write request should end immediately before the second
|
||||
* COW region (see above for why it does not always happen) */
|
||||
if (m->offset + m->cow_end.offset != offset + bytes) {
|
||||
assert(offset + bytes > m->offset + m->cow_end.offset);
|
||||
assert(m->cow_end.nb_bytes == 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -2387,26 +2398,26 @@ static bool merge_cow(uint64_t offset, unsigned bytes,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool is_unallocated(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
{
|
||||
int64_t nr;
|
||||
return !bytes ||
|
||||
(!bdrv_is_allocated_above(bs, NULL, false, offset, bytes, &nr) &&
|
||||
nr == bytes);
|
||||
}
|
||||
|
||||
static bool is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
|
||||
/*
|
||||
* Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error.
|
||||
* Note that returning 0 does not guarantee non-zero data.
|
||||
*/
|
||||
static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
|
||||
{
|
||||
/*
|
||||
* This check is designed for optimization shortcut so it must be
|
||||
* efficient.
|
||||
* Instead of is_zero(), use is_unallocated() as it is faster (but not
|
||||
* as accurate and can result in false negatives).
|
||||
* Instead of is_zero(), use bdrv_co_is_zero_fast() as it is
|
||||
* faster (but not as accurate and can result in false negatives).
|
||||
*/
|
||||
return is_unallocated(bs, m->offset + m->cow_start.offset,
|
||||
m->cow_start.nb_bytes) &&
|
||||
is_unallocated(bs, m->offset + m->cow_end.offset,
|
||||
m->cow_end.nb_bytes);
|
||||
int ret = bdrv_co_is_zero_fast(bs, m->offset + m->cow_start.offset,
|
||||
m->cow_start.nb_bytes);
|
||||
if (ret <= 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_co_is_zero_fast(bs, m->offset + m->cow_end.offset,
|
||||
m->cow_end.nb_bytes);
|
||||
}
|
||||
|
||||
static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
|
||||
|
@ -2432,7 +2443,10 @@ static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!is_zero_cow(bs, m)) {
|
||||
ret = is_zero_cow(bs, m);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
} else if (ret == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -3374,7 +3388,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
|||
size_t cluster_size;
|
||||
int version;
|
||||
int refcount_order;
|
||||
uint64_t* refcount_table;
|
||||
uint64_t *refcount_table;
|
||||
int ret;
|
||||
uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
|
||||
|
||||
|
@ -3860,8 +3874,20 @@ static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
|||
if (!bytes) {
|
||||
return true;
|
||||
}
|
||||
res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
|
||||
return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes;
|
||||
|
||||
/*
|
||||
* bdrv_block_status_above doesn't merge different types of zeros, for
|
||||
* example, zeros which come from the region which is unallocated in
|
||||
* the whole backing chain, and zeros which come because of a short
|
||||
* backing file. So, we need a loop.
|
||||
*/
|
||||
do {
|
||||
res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
|
||||
offset += nr;
|
||||
bytes -= nr;
|
||||
} while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
|
||||
|
||||
return res >= 0 && (res & BDRV_BLOCK_ZERO) && bytes == 0;
|
||||
}
|
||||
|
||||
static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
|
|
|
@ -343,8 +343,8 @@ typedef struct BDRVQcow2State {
|
|||
uint64_t l1_table_offset;
|
||||
uint64_t *l1_table;
|
||||
|
||||
Qcow2Cache* l2_table_cache;
|
||||
Qcow2Cache* refcount_block_cache;
|
||||
Qcow2Cache *l2_table_cache;
|
||||
Qcow2Cache *refcount_block_cache;
|
||||
QEMUTimer *cache_clean_timer;
|
||||
unsigned cache_clean_interval;
|
||||
|
||||
|
@ -394,7 +394,7 @@ typedef struct BDRVQcow2State {
|
|||
uint64_t autoclear_features;
|
||||
|
||||
size_t unknown_header_fields_size;
|
||||
void* unknown_header_fields;
|
||||
void *unknown_header_fields;
|
||||
QLIST_HEAD(, Qcow2UnknownHeaderExtension) unknown_header_ext;
|
||||
QTAILQ_HEAD (, Qcow2DiscardRegion) discards;
|
||||
bool cache_discards;
|
||||
|
@ -435,17 +435,18 @@ typedef struct Qcow2COWRegion {
|
|||
|
||||
/**
|
||||
* Describes an in-flight (part of a) write request that writes to clusters
|
||||
* that are not referenced in their L2 table yet.
|
||||
* that need to have their L2 table entries updated (because they are
|
||||
* newly allocated or need changes in their L2 bitmaps)
|
||||
*/
|
||||
typedef struct QCowL2Meta
|
||||
{
|
||||
/** Guest offset of the first newly allocated cluster */
|
||||
/** Guest offset of the first updated cluster */
|
||||
uint64_t offset;
|
||||
|
||||
/** Host offset of the first newly allocated cluster */
|
||||
/** Host offset of the first updated cluster */
|
||||
uint64_t alloc_offset;
|
||||
|
||||
/** Number of newly allocated clusters */
|
||||
/** Number of updated clusters */
|
||||
int nb_clusters;
|
||||
|
||||
/** Do not free the old clusters */
|
||||
|
@ -458,14 +459,16 @@ typedef struct QCowL2Meta
|
|||
CoQueue dependent_requests;
|
||||
|
||||
/**
|
||||
* The COW Region between the start of the first allocated cluster and the
|
||||
* area the guest actually writes to.
|
||||
* The COW Region immediately before the area the guest actually
|
||||
* writes to. This (part of the) write request starts at
|
||||
* cow_start.offset + cow_start.nb_bytes.
|
||||
*/
|
||||
Qcow2COWRegion cow_start;
|
||||
|
||||
/**
|
||||
* The COW Region between the area the guest actually writes to and the
|
||||
* end of the last allocated cluster.
|
||||
* The COW Region immediately after the area the guest actually
|
||||
* writes to. This (part of the) write request ends at cow_end.offset
|
||||
* (which must always be set even when cow_end.nb_bytes is 0).
|
||||
*/
|
||||
Qcow2COWRegion cow_end;
|
||||
|
||||
|
|
|
@ -856,7 +856,7 @@ static int quorum_valid_threshold(int threshold, int num_children, Error **errp)
|
|||
|
||||
if (threshold < 1) {
|
||||
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
||||
"vote-threshold", "value >= 1");
|
||||
"vote-threshold", "a value >= 1");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
@ -1163,7 +1163,12 @@ static void quorum_child_perm(BlockDriverState *bs, BdrvChild *c,
|
|||
uint64_t perm, uint64_t shared,
|
||||
uint64_t *nperm, uint64_t *nshared)
|
||||
{
|
||||
BDRVQuorumState *s = bs->opaque;
|
||||
|
||||
*nperm = perm & DEFAULT_PERM_PASSTHROUGH;
|
||||
if (s->rewrite_corrupted) {
|
||||
*nperm |= BLK_PERM_WRITE;
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot share RESIZE or WRITE, as this would make the
|
||||
|
|
|
@ -242,6 +242,16 @@ typedef struct SheepdogInode {
|
|||
*/
|
||||
#define FNV1A_64_INIT ((uint64_t)0xcbf29ce484222325ULL)
|
||||
|
||||
static void deprecation_warning(void)
|
||||
{
|
||||
static bool warned;
|
||||
|
||||
if (!warned) {
|
||||
warn_report("the sheepdog block driver is deprecated");
|
||||
warned = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* 64 bit Fowler/Noll/Vo FNV-1a hash code
|
||||
*/
|
||||
|
@ -1548,6 +1558,8 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
char *buf = NULL;
|
||||
QemuOpts *opts;
|
||||
|
||||
deprecation_warning();
|
||||
|
||||
s->bs = bs;
|
||||
s->aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
|
@ -2007,6 +2019,8 @@ static int sd_co_create(BlockdevCreateOptions *options, Error **errp)
|
|||
|
||||
assert(options->driver == BLOCKDEV_DRIVER_SHEEPDOG);
|
||||
|
||||
deprecation_warning();
|
||||
|
||||
s = g_new0(BDRVSheepdogState, 1);
|
||||
|
||||
/* Steal SocketAddress from QAPI, set NULL to prevent double free */
|
||||
|
|
|
@ -167,7 +167,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
|
|||
n = len - offset;
|
||||
}
|
||||
|
||||
copy = (ret == 1);
|
||||
copy = (ret > 0);
|
||||
}
|
||||
trace_stream_one_iteration(s, offset, n, ret);
|
||||
if (copy) {
|
||||
|
|
|
@ -546,7 +546,7 @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
|
|||
tgm->aio_context = ctx;
|
||||
qatomic_set(&tgm->restart_pending, 0);
|
||||
|
||||
qemu_mutex_lock(&tg->lock);
|
||||
QEMU_LOCK_GUARD(&tg->lock);
|
||||
/* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (!tg->tokens[i]) {
|
||||
|
@ -565,8 +565,6 @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
|
|||
qemu_co_mutex_init(&tgm->throttled_reqs_lock);
|
||||
qemu_co_queue_init(&tgm->throttled_reqs[0]);
|
||||
qemu_co_queue_init(&tgm->throttled_reqs[1]);
|
||||
|
||||
qemu_mutex_unlock(&tg->lock);
|
||||
}
|
||||
|
||||
/* Unregister a ThrottleGroupMember from its group, removing it from the list,
|
||||
|
@ -594,25 +592,25 @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
|
|||
/* Wait for throttle_group_restart_queue_entry() coroutines to finish */
|
||||
AIO_WAIT_WHILE(tgm->aio_context, qatomic_read(&tgm->restart_pending) > 0);
|
||||
|
||||
qemu_mutex_lock(&tg->lock);
|
||||
for (i = 0; i < 2; i++) {
|
||||
assert(tgm->pending_reqs[i] == 0);
|
||||
assert(qemu_co_queue_empty(&tgm->throttled_reqs[i]));
|
||||
assert(!timer_pending(tgm->throttle_timers.timers[i]));
|
||||
if (tg->tokens[i] == tgm) {
|
||||
token = throttle_group_next_tgm(tgm);
|
||||
/* Take care of the case where this is the last tgm in the group */
|
||||
if (token == tgm) {
|
||||
token = NULL;
|
||||
WITH_QEMU_LOCK_GUARD(&tg->lock) {
|
||||
for (i = 0; i < 2; i++) {
|
||||
assert(tgm->pending_reqs[i] == 0);
|
||||
assert(qemu_co_queue_empty(&tgm->throttled_reqs[i]));
|
||||
assert(!timer_pending(tgm->throttle_timers.timers[i]));
|
||||
if (tg->tokens[i] == tgm) {
|
||||
token = throttle_group_next_tgm(tgm);
|
||||
/* Take care of the case where this is the last tgm in the group */
|
||||
if (token == tgm) {
|
||||
token = NULL;
|
||||
}
|
||||
tg->tokens[i] = token;
|
||||
}
|
||||
tg->tokens[i] = token;
|
||||
}
|
||||
}
|
||||
|
||||
/* remove the current tgm from the list */
|
||||
QLIST_REMOVE(tgm, round_robin);
|
||||
throttle_timers_destroy(&tgm->throttle_timers);
|
||||
qemu_mutex_unlock(&tg->lock);
|
||||
/* remove the current tgm from the list */
|
||||
QLIST_REMOVE(tgm, round_robin);
|
||||
throttle_timers_destroy(&tgm->throttle_timers);
|
||||
}
|
||||
|
||||
throttle_group_unref(&tg->ts);
|
||||
tgm->throttle_state = NULL;
|
||||
|
@ -638,14 +636,14 @@ void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
|
|||
assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
|
||||
|
||||
/* Kick off next ThrottleGroupMember, if necessary */
|
||||
qemu_mutex_lock(&tg->lock);
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (timer_pending(tt->timers[i])) {
|
||||
tg->any_timer_armed[i] = false;
|
||||
schedule_next_request(tgm, i);
|
||||
WITH_QEMU_LOCK_GUARD(&tg->lock) {
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (timer_pending(tt->timers[i])) {
|
||||
tg->any_timer_armed[i] = false;
|
||||
schedule_next_request(tgm, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&tg->lock);
|
||||
|
||||
throttle_timers_detach_aio_context(tt);
|
||||
tgm->aio_context = NULL;
|
||||
|
|
|
@ -134,25 +134,29 @@ qed_aio_write_postfill(void *s, void *acb, uint64_t start, size_t len, uint64_t
|
|||
qed_aio_write_main(void *s, void *acb, int ret, uint64_t offset, size_t len) "s %p acb %p ret %d offset %"PRIu64" len %zu"
|
||||
|
||||
# nvme.c
|
||||
nvme_kick(void *s, int queue) "s %p queue %d"
|
||||
nvme_controller_capability_raw(uint64_t value) "0x%08"PRIx64
|
||||
nvme_controller_capability(const char *desc, uint64_t value) "%s: %"PRIu64
|
||||
nvme_kick(void *s, unsigned q_index) "s %p q #%u"
|
||||
nvme_dma_flush_queue_wait(void *s) "s %p"
|
||||
nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) "cmd_specific %d sq_head %d sqid %d cid %d status 0x%x"
|
||||
nvme_process_completion(void *s, int index, int inflight) "s %p queue %d inflight %d"
|
||||
nvme_process_completion_queue_plugged(void *s, int index) "s %p queue %d"
|
||||
nvme_complete_command(void *s, int index, int cid) "s %p queue %d cid %d"
|
||||
nvme_submit_command(void *s, int index, int cid) "s %p queue %d cid %d"
|
||||
nvme_process_completion(void *s, unsigned q_index, int inflight) "s %p q #%u inflight %d"
|
||||
nvme_process_completion_queue_plugged(void *s, unsigned q_index) "s %p q #%u"
|
||||
nvme_complete_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d"
|
||||
nvme_submit_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d"
|
||||
nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) "%02x %02x %02x %02x %02x %02x %02x %02x"
|
||||
nvme_handle_event(void *s) "s %p"
|
||||
nvme_poll_cb(void *s) "s %p"
|
||||
nvme_prw_aligned(void *s, int is_write, uint64_t offset, uint64_t bytes, int flags, int niov) "s %p is_write %d offset %"PRId64" bytes %"PRId64" flags %d niov %d"
|
||||
nvme_write_zeroes(void *s, uint64_t offset, uint64_t bytes, int flags) "s %p offset %"PRId64" bytes %"PRId64" flags %d"
|
||||
nvme_poll_queue(void *s, unsigned q_index) "s %p q #%u"
|
||||
nvme_prw_aligned(void *s, int is_write, uint64_t offset, uint64_t bytes, int flags, int niov) "s %p is_write %d offset 0x%"PRIx64" bytes %"PRId64" flags %d niov %d"
|
||||
nvme_write_zeroes(void *s, uint64_t offset, uint64_t bytes, int flags) "s %p offset 0x%"PRIx64" bytes %"PRId64" flags %d"
|
||||
nvme_qiov_unaligned(const void *qiov, int n, void *base, size_t size, int align) "qiov %p n %d base %p size 0x%zx align 0x%x"
|
||||
nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int is_write) "s %p offset %"PRId64" bytes %"PRId64" niov %d is_write %d"
|
||||
nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int ret) "s %p is_write %d offset %"PRId64" bytes %"PRId64" ret %d"
|
||||
nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset %"PRId64" bytes %"PRId64""
|
||||
nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p offset %"PRId64" bytes %"PRId64" ret %d"
|
||||
nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int is_write) "s %p offset 0x%"PRIx64" bytes %"PRId64" niov %d is_write %d"
|
||||
nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int ret) "s %p is_write %d offset 0x%"PRIx64" bytes %"PRId64" ret %d"
|
||||
nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64""
|
||||
nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d"
|
||||
nvme_dma_map_flush(void *s) "s %p"
|
||||
nvme_free_req_queue_wait(void *q) "q %p"
|
||||
nvme_free_req_queue_wait(void *s, unsigned q_index) "s %p q #%u"
|
||||
nvme_create_queue_pair(unsigned q_index, void *q, unsigned size, void *aio_context, int fd) "index %u q %p size %u aioctx %p fd %d"
|
||||
nvme_free_queue_pair(unsigned q_index, void *q) "index %u q %p"
|
||||
nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s %p cmd %p req %p qiov %p entries %d"
|
||||
nvme_cmd_map_qiov_pages(void *s, int i, uint64_t page) "s %p page[%d] 0x%"PRIx64
|
||||
nvme_cmd_map_qiov_iov(void *s, int i, void *page, int pages) "s %p iov[%d] %p pages %d"
|
||||
|
|
|
@ -595,7 +595,7 @@ static int vmdk_open_vmfs_sparse(BlockDriverState *bs,
|
|||
int ret;
|
||||
uint32_t magic;
|
||||
VMDK3Header header;
|
||||
VmdkExtent *extent;
|
||||
VmdkExtent *extent = NULL;
|
||||
|
||||
ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header));
|
||||
if (ret < 0) {
|
||||
|
@ -751,7 +751,7 @@ static int vmdk_open_se_sparse(BlockDriverState *bs,
|
|||
int ret;
|
||||
VMDKSESparseConstHeader const_header;
|
||||
VMDKSESparseVolatileHeader volatile_header;
|
||||
VmdkExtent *extent;
|
||||
VmdkExtent *extent = NULL;
|
||||
|
||||
ret = bdrv_apply_auto_read_only(bs,
|
||||
"No write support for seSparse images available", errp);
|
||||
|
@ -869,7 +869,7 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
|
|||
uint32_t magic;
|
||||
uint32_t l1_size, l1_entry_sectors;
|
||||
VMDK4Header header;
|
||||
VmdkExtent *extent;
|
||||
VmdkExtent *extent = NULL;
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
int64_t l1_backup_offset = 0;
|
||||
bool compressed;
|
||||
|
@ -1088,7 +1088,7 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
|
|||
BdrvChild *extent_file;
|
||||
BdrvChildRole extent_role;
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
VmdkExtent *extent;
|
||||
VmdkExtent *extent = NULL;
|
||||
char extent_opt_prefix[32];
|
||||
Error *local_err = NULL;
|
||||
|
||||
|
|
10
block/vpc.c
10
block/vpc.c
|
@ -172,7 +172,7 @@ static QemuOptsList vpc_runtime_opts = {
|
|||
|
||||
static QemuOptsList vpc_create_opts;
|
||||
|
||||
static uint32_t vpc_checksum(uint8_t* buf, size_t size)
|
||||
static uint32_t vpc_checksum(uint8_t *buf, size_t size)
|
||||
{
|
||||
uint32_t res = 0;
|
||||
int i;
|
||||
|
@ -528,7 +528,7 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
|
|||
*
|
||||
* Returns 0 on success and < 0 on error
|
||||
*/
|
||||
static int rewrite_footer(BlockDriverState* bs)
|
||||
static int rewrite_footer(BlockDriverState *bs)
|
||||
{
|
||||
int ret;
|
||||
BDRVVPCState *s = bs->opaque;
|
||||
|
@ -548,7 +548,7 @@ static int rewrite_footer(BlockDriverState* bs)
|
|||
*
|
||||
* Returns the sectors' offset in the image file on success and < 0 on error
|
||||
*/
|
||||
static int64_t alloc_block(BlockDriverState* bs, int64_t offset)
|
||||
static int64_t alloc_block(BlockDriverState *bs, int64_t offset)
|
||||
{
|
||||
BDRVVPCState *s = bs->opaque;
|
||||
int64_t bat_offset;
|
||||
|
@ -781,8 +781,8 @@ static int coroutine_fn vpc_co_block_status(BlockDriverState *bs,
|
|||
* the hardware EIDE and ATA-2 limit of 16 heads (max disk size of 127 GB)
|
||||
* and instead allow up to 255 heads.
|
||||
*/
|
||||
static int calculate_geometry(int64_t total_sectors, uint16_t* cyls,
|
||||
uint8_t* heads, uint8_t* secs_per_cyl)
|
||||
static int calculate_geometry(int64_t total_sectors, uint16_t *cyls,
|
||||
uint8_t *heads, uint8_t *secs_per_cyl)
|
||||
{
|
||||
uint32_t cyls_times_heads;
|
||||
|
||||
|
|
|
@ -1437,7 +1437,7 @@ static void print_direntry(const direntry_t* direntry)
|
|||
for(i=0;i<11;i++)
|
||||
ADD_CHAR(direntry->name[i]);
|
||||
buffer[j] = 0;
|
||||
fprintf(stderr,"%s attributes=0x%02x begin=%d size=%d\n",
|
||||
fprintf(stderr, "%s attributes=0x%02x begin=%u size=%u\n",
|
||||
buffer,
|
||||
direntry->attributes,
|
||||
begin_of_direntry(direntry),le32_to_cpu(direntry->size));
|
||||
|
@ -1446,7 +1446,7 @@ static void print_direntry(const direntry_t* direntry)
|
|||
|
||||
static void print_mapping(const mapping_t* mapping)
|
||||
{
|
||||
fprintf(stderr, "mapping (%p): begin, end = %d, %d, dir_index = %d, "
|
||||
fprintf(stderr, "mapping (%p): begin, end = %u, %u, dir_index = %u, "
|
||||
"first_mapping_index = %d, name = %s, mode = 0x%x, " ,
|
||||
mapping, mapping->begin, mapping->end, mapping->dir_index,
|
||||
mapping->first_mapping_index, mapping->path, mapping->mode);
|
||||
|
@ -1454,7 +1454,7 @@ static void print_mapping(const mapping_t* mapping)
|
|||
if (mapping->mode & MODE_DIRECTORY)
|
||||
fprintf(stderr, "parent_mapping_index = %d, first_dir_index = %d\n", mapping->info.dir.parent_mapping_index, mapping->info.dir.first_dir_index);
|
||||
else
|
||||
fprintf(stderr, "offset = %d\n", mapping->info.file.offset);
|
||||
fprintf(stderr, "offset = %u\n", mapping->info.file.offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1588,7 +1588,7 @@ typedef struct commit_t {
|
|||
static void clear_commits(BDRVVVFATState* s)
|
||||
{
|
||||
int i;
|
||||
DLOG(fprintf(stderr, "clear_commits (%d commits)\n", s->commits.next));
|
||||
DLOG(fprintf(stderr, "clear_commits (%u commits)\n", s->commits.next));
|
||||
for (i = 0; i < s->commits.next; i++) {
|
||||
commit_t* commit = array_get(&(s->commits), i);
|
||||
assert(commit->path || commit->action == ACTION_WRITEOUT);
|
||||
|
@ -2648,7 +2648,9 @@ static int handle_renames_and_mkdirs(BDRVVVFATState* s)
|
|||
fprintf(stderr, "handle_renames\n");
|
||||
for (i = 0; i < s->commits.next; i++) {
|
||||
commit_t* commit = array_get(&(s->commits), i);
|
||||
fprintf(stderr, "%d, %s (%d, %d)\n", i, commit->path ? commit->path : "(null)", commit->param.rename.cluster, commit->action);
|
||||
fprintf(stderr, "%d, %s (%u, %d)\n", i,
|
||||
commit->path ? commit->path : "(null)",
|
||||
commit->param.rename.cluster, commit->action);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include "sysemu/block-backend.h"
|
||||
#include "hw/block/block.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/clone-visitor.h"
|
||||
#include "qapi/qapi-visit-block-export.h"
|
||||
#include "qapi/qapi-commands-block-export.h"
|
||||
#include "block/nbd.h"
|
||||
#include "io/channel-socket.h"
|
||||
|
@ -195,7 +197,8 @@ void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp)
|
|||
* the device name as a default here for compatibility.
|
||||
*/
|
||||
if (!arg->has_name) {
|
||||
arg->name = arg->device;
|
||||
arg->has_name = true;
|
||||
arg->name = g_strdup(arg->device);
|
||||
}
|
||||
|
||||
export_opts = g_new(BlockExportOptions, 1);
|
||||
|
@ -205,15 +208,13 @@ void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp)
|
|||
.node_name = g_strdup(bdrv_get_node_name(bs)),
|
||||
.has_writable = arg->has_writable,
|
||||
.writable = arg->writable,
|
||||
.u.nbd = {
|
||||
.has_name = true,
|
||||
.name = g_strdup(arg->name),
|
||||
.has_description = arg->has_description,
|
||||
.description = g_strdup(arg->description),
|
||||
.has_bitmap = arg->has_bitmap,
|
||||
.bitmap = g_strdup(arg->bitmap),
|
||||
},
|
||||
};
|
||||
QAPI_CLONE_MEMBERS(BlockExportOptionsNbdBase, &export_opts->u.nbd,
|
||||
qapi_NbdServerAddOptions_base(arg));
|
||||
if (arg->has_bitmap) {
|
||||
export_opts->u.nbd.has_bitmaps = true;
|
||||
QAPI_LIST_PREPEND(export_opts->u.nbd.bitmaps, g_strdup(arg->bitmap));
|
||||
}
|
||||
|
||||
/*
|
||||
* nbd-server-add doesn't complain when a read-only device should be
|
||||
|
|
32
blockdev.c
32
blockdev.c
|
@ -1827,6 +1827,7 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
|
|||
if (set_backing_hd) {
|
||||
bdrv_set_backing_hd(target_bs, source, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
goto unref;
|
||||
}
|
||||
}
|
||||
|
@ -2454,7 +2455,7 @@ void coroutine_fn qmp_block_resize(bool has_device, const char *device,
|
|||
int64_t size, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
BlockBackend *blk = NULL;
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
AioContext *old_ctx;
|
||||
|
||||
|
@ -2468,27 +2469,29 @@ void coroutine_fn qmp_block_resize(bool has_device, const char *device,
|
|||
|
||||
if (size < 0) {
|
||||
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size");
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
|
||||
error_setg(errp, QERR_DEVICE_IN_USE, device);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp);
|
||||
if (!blk) {
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_co_lock(bs);
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_co_unlock(bs);
|
||||
|
||||
old_ctx = bdrv_co_enter(bs);
|
||||
blk_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp);
|
||||
bdrv_co_leave(bs, old_ctx);
|
||||
bdrv_drained_end(bs);
|
||||
|
||||
out:
|
||||
bdrv_co_lock(bs);
|
||||
bdrv_drained_end(bs);
|
||||
blk_unref(blk);
|
||||
bdrv_co_unlock(bs);
|
||||
}
|
||||
|
@ -2531,7 +2534,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
|||
if (has_base) {
|
||||
base_bs = bdrv_find_backing_image(bs, base);
|
||||
if (base_bs == NULL) {
|
||||
error_setg(errp, QERR_BASE_NOT_FOUND, base);
|
||||
error_setg(errp, "Can't find '%s' in the backing chain", base);
|
||||
goto out;
|
||||
}
|
||||
assert(bdrv_get_aio_context(base_bs) == aio_context);
|
||||
|
@ -2703,13 +2706,16 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
|||
}
|
||||
} else if (has_base && base) {
|
||||
base_bs = bdrv_find_backing_image(top_bs, base);
|
||||
if (base_bs == NULL) {
|
||||
error_setg(errp, "Can't find '%s' in the backing chain", base);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
base_bs = bdrv_find_base(top_bs);
|
||||
}
|
||||
|
||||
if (base_bs == NULL) {
|
||||
error_setg(errp, QERR_BASE_NOT_FOUND, base ? base : "NULL");
|
||||
goto out;
|
||||
if (base_bs == NULL) {
|
||||
error_setg(errp, "There is no backimg image");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
assert(bdrv_get_aio_context(base_bs) == aio_context);
|
||||
|
@ -2988,7 +2994,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
|
|||
}
|
||||
if (granularity & (granularity - 1)) {
|
||||
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
|
||||
"power of 2");
|
||||
"a power of 2");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -745,7 +745,6 @@ int main(int argc, char **argv)
|
|||
const char *gdbstub = NULL;
|
||||
char **target_environ, **wrk;
|
||||
envlist_t *envlist = NULL;
|
||||
char *trace_file = NULL;
|
||||
bsd_type = target_openbsd;
|
||||
|
||||
if (argc <= 1)
|
||||
|
@ -851,8 +850,7 @@ int main(int argc, char **argv)
|
|||
} else if (!strcmp(r, "strace")) {
|
||||
do_strace = 1;
|
||||
} else if (!strcmp(r, "trace")) {
|
||||
g_free(trace_file);
|
||||
trace_file = trace_opt_parse(optarg);
|
||||
trace_opt_parse(optarg);
|
||||
} else {
|
||||
usage();
|
||||
}
|
||||
|
@ -880,7 +878,7 @@ int main(int argc, char **argv)
|
|||
if (!trace_init_backends()) {
|
||||
exit(1);
|
||||
}
|
||||
trace_init_file(trace_file);
|
||||
trace_init_file();
|
||||
|
||||
/* Zero out regs */
|
||||
memset(regs, 0, sizeof(struct target_pt_regs));
|
||||
|
|
|
@ -33,6 +33,13 @@
|
|||
|
||||
/* MUX driver for serial I/O splitting */
|
||||
|
||||
/*
|
||||
* Set to false by suspend_mux_open. Open events are delayed until
|
||||
* resume_mux_open. Usually suspend_mux_open is called before
|
||||
* command line processing and resume_mux_open afterwards.
|
||||
*/
|
||||
static bool muxes_opened = true;
|
||||
|
||||
/* Called with chr_write_lock held. */
|
||||
static int mux_chr_write(Chardev *chr, const uint8_t *buf, int len)
|
||||
{
|
||||
|
@ -237,7 +244,7 @@ void mux_chr_send_all_event(Chardev *chr, QEMUChrEvent event)
|
|||
MuxChardev *d = MUX_CHARDEV(chr);
|
||||
int i;
|
||||
|
||||
if (!machine_init_done) {
|
||||
if (!muxes_opened) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -328,7 +335,7 @@ static void qemu_chr_open_mux(Chardev *chr,
|
|||
/* only default to opened state if we've realized the initial
|
||||
* set of muxes
|
||||
*/
|
||||
*be_opened = machine_init_done;
|
||||
*be_opened = muxes_opened;
|
||||
qemu_chr_fe_init(&d->chr, drv, errp);
|
||||
}
|
||||
|
||||
|
@ -360,19 +367,42 @@ static void qemu_chr_parse_mux(QemuOpts *opts, ChardevBackend *backend,
|
|||
* mux will receive CHR_EVENT_OPENED notifications for the BE
|
||||
* immediately.
|
||||
*/
|
||||
static int open_muxes(Chardev *chr)
|
||||
static void open_muxes(Chardev *chr)
|
||||
{
|
||||
/* send OPENED to all already-attached FEs */
|
||||
mux_chr_send_all_event(chr, CHR_EVENT_OPENED);
|
||||
|
||||
/*
|
||||
* mark mux as OPENED so any new FEs will immediately receive
|
||||
* OPENED event
|
||||
*/
|
||||
chr->be_open = 1;
|
||||
}
|
||||
|
||||
void suspend_mux_open(void)
|
||||
{
|
||||
muxes_opened = false;
|
||||
}
|
||||
|
||||
static int chardev_options_parsed_cb(Object *child, void *opaque)
|
||||
{
|
||||
Chardev *chr = (Chardev *)child;
|
||||
ChardevClass *class = CHARDEV_GET_CLASS(chr);
|
||||
|
||||
if (!chr->be_open && class->chr_options_parsed) {
|
||||
class->chr_options_parsed(chr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void resume_mux_open(void)
|
||||
{
|
||||
muxes_opened = true;
|
||||
object_child_foreach(get_chardevs_root(),
|
||||
chardev_options_parsed_cb, NULL);
|
||||
}
|
||||
|
||||
static void char_mux_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
ChardevClass *cc = CHARDEV_CLASS(oc);
|
||||
|
@ -383,7 +413,7 @@ static void char_mux_class_init(ObjectClass *oc, void *data)
|
|||
cc->chr_accept_input = mux_chr_accept_input;
|
||||
cc->chr_add_watch = mux_chr_add_watch;
|
||||
cc->chr_be_event = mux_chr_be_event;
|
||||
cc->chr_machine_done = open_muxes;
|
||||
cc->chr_options_parsed = open_muxes;
|
||||
cc->chr_update_read_handler = mux_chr_update_read_handlers;
|
||||
}
|
||||
|
||||
|
|
|
@ -443,10 +443,24 @@ static char *qemu_chr_socket_address(SocketChardev *s, const char *prefix)
|
|||
s->is_listen ? ",server" : "");
|
||||
break;
|
||||
case SOCKET_ADDRESS_TYPE_UNIX:
|
||||
return g_strdup_printf("%sunix:%s%s", prefix,
|
||||
s->addr->u.q_unix.path,
|
||||
{
|
||||
const char *tight = "", *abstract = "";
|
||||
UnixSocketAddress *sa = &s->addr->u.q_unix;
|
||||
|
||||
#ifdef CONFIG_LINUX
|
||||
if (sa->has_abstract && sa->abstract) {
|
||||
abstract = ",abstract";
|
||||
if (sa->has_tight && sa->tight) {
|
||||
tight = ",tight";
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return g_strdup_printf("%sunix:%s%s%s%s", prefix, sa->path,
|
||||
abstract, tight,
|
||||
s->is_listen ? ",server" : "");
|
||||
break;
|
||||
}
|
||||
case SOCKET_ADDRESS_TYPE_FD:
|
||||
return g_strdup_printf("%sfd:%s%s", prefix, s->addr->u.fd.str,
|
||||
s->is_listen ? ",server" : "");
|
||||
|
@ -1386,8 +1400,10 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
|
|||
const char *host = qemu_opt_get(opts, "host");
|
||||
const char *port = qemu_opt_get(opts, "port");
|
||||
const char *fd = qemu_opt_get(opts, "fd");
|
||||
#ifdef CONFIG_LINUX
|
||||
bool tight = qemu_opt_get_bool(opts, "tight", true);
|
||||
bool abstract = qemu_opt_get_bool(opts, "abstract", false);
|
||||
#endif
|
||||
SocketAddressLegacy *addr;
|
||||
ChardevSocket *sock;
|
||||
|
||||
|
@ -1439,8 +1455,12 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
|
|||
addr->type = SOCKET_ADDRESS_LEGACY_KIND_UNIX;
|
||||
q_unix = addr->u.q_unix.data = g_new0(UnixSocketAddress, 1);
|
||||
q_unix->path = g_strdup(path);
|
||||
#ifdef CONFIG_LINUX
|
||||
q_unix->has_tight = true;
|
||||
q_unix->tight = tight;
|
||||
q_unix->has_abstract = true;
|
||||
q_unix->abstract = abstract;
|
||||
#endif
|
||||
} else if (host) {
|
||||
addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET;
|
||||
addr->u.inet.data = g_new(InetSocketAddress, 1);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue