Merge
This commit is contained in:
commit
b91762af2b
@ -244,6 +244,7 @@ build-tcg-disabled:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-tcg --audio-drv-list="" --with-coroutine=ucontext
|
||||
--disable-docs --disable-sdl --disable-gtk --disable-vnc
|
||||
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- make check-unit
|
||||
@ -275,14 +276,10 @@ build-user-static:
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system --static
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# Because the hexagon cross-compiler takes so long to build we don't rely
|
||||
# on the CI system to build it and hence this job has an optional dependency
|
||||
# declared. The image is manually uploaded.
|
||||
build-user-hexagon:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: hexagon-cross-container
|
||||
optional: true
|
||||
variables:
|
||||
IMAGE: debian-hexagon-cross
|
||||
TARGETS: hexagon-linux-user
|
||||
@ -534,8 +531,9 @@ build-tci:
|
||||
- TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter
|
||||
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)" || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- ../configure --enable-tcg-interpreter --disable-docs --disable-gtk --disable-vnc
|
||||
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
|
||||
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- make tests/qtest/boot-serial-test tests/qtest/cdrom-test tests/qtest/pxe-test
|
||||
- for tg in $TARGETS ; do
|
||||
|
@ -50,7 +50,7 @@ x64-freebsd-12-build:
|
||||
NAME: freebsd-12
|
||||
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image_family
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-12-3
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-12-4
|
||||
CIRRUS_VM_CPUS: 8
|
||||
CIRRUS_VM_RAM: 8G
|
||||
UPDATE_COMMAND: pkg update
|
||||
|
@ -34,31 +34,11 @@ armhf-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-armhf-cross
|
||||
|
||||
# We never want to build hexagon in the CI system and by default we
|
||||
# always want to refer to the master registry where it lives.
|
||||
hexagon-cross-container:
|
||||
extends: .base_job_template
|
||||
image: docker:stable
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-hexagon-cross
|
||||
GIT_DEPTH: 1
|
||||
QEMU_JOB_ONLY_FORKS: 1
|
||||
services:
|
||||
- docker:dind
|
||||
before_script:
|
||||
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
|
||||
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
|
||||
- docker info
|
||||
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
|
||||
script:
|
||||
- echo "TAG:$TAG"
|
||||
- echo "COMMON_TAG:$COMMON_TAG"
|
||||
- docker pull $COMMON_TAG
|
||||
- docker tag $COMMON_TAG $TAG
|
||||
- docker push "$TAG"
|
||||
after_script:
|
||||
- docker logout
|
||||
|
||||
hppa-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
|
@ -6,8 +6,7 @@
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
|
||||
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-user --target-list-exclude="arm-softmmu cris-softmmu
|
||||
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
|
||||
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
|
||||
@ -32,8 +31,7 @@
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
|
||||
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-tools --enable-${ACCEL:-kvm} $EXTRA_CONFIGURE_OPTS
|
||||
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
|
||||
|
||||
@ -44,8 +42,7 @@
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
|
||||
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-system --target-list-exclude="aarch64_be-linux-user
|
||||
alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user
|
||||
nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
|
||||
|
@ -112,6 +112,14 @@ cross-ppc64el-user:
|
||||
variables:
|
||||
IMAGE: debian-ppc64el-cross
|
||||
|
||||
cross-ppc64el-kvm-only:
|
||||
extends: .cross_accel_build_job
|
||||
needs:
|
||||
job: ppc64el-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-ppc64el-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-devices
|
||||
|
||||
# The riscv64 cross-builds currently use a 'sid' container to get
|
||||
# compilers and libraries. Until something more stable is found we
|
||||
# allow_failure so as not to block CI.
|
||||
|
@ -19,9 +19,9 @@ ubuntu-20.04-s390x-all-linux-static:
|
||||
- ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
- make --output-sync -j`nproc` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
- make --output-sync -j`nproc` check-tcg V=1
|
||||
- make --output-sync -j`nproc` check-tcg
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
||||
ubuntu-20.04-s390x-all:
|
||||
@ -40,7 +40,7 @@ ubuntu-20.04-s390x-all:
|
||||
- ../configure --disable-libssh
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
- make --output-sync -j`nproc` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
||||
ubuntu-20.04-s390x-alldbg:
|
||||
@ -63,7 +63,7 @@ ubuntu-20.04-s390x-alldbg:
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make clean
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
- make --output-sync -j`nproc` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
||||
ubuntu-20.04-s390x-clang:
|
||||
@ -85,7 +85,7 @@ ubuntu-20.04-s390x-clang:
|
||||
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
- make --output-sync -j`nproc` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
||||
ubuntu-20.04-s390x-tci:
|
||||
@ -127,5 +127,5 @@ ubuntu-20.04-s390x-notcg:
|
||||
- ../configure --disable-libssh --disable-tcg
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
- make --output-sync -j`nproc` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
@ -21,5 +21,5 @@ ubuntu-22.04-aarch32-all:
|
||||
- ../configure --cross-prefix=arm-linux-gnueabihf-
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check V=1
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
@ -19,9 +19,9 @@ ubuntu-22.04-aarch64-all-linux-static:
|
||||
- ../configure --enable-debug --static --disable-system --disable-pie
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check V=1
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
- make --output-sync -j`nproc --ignore=40` check-tcg V=1
|
||||
- make --output-sync -j`nproc --ignore=40` check-tcg
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
||||
ubuntu-22.04-aarch64-all:
|
||||
@ -43,7 +43,7 @@ ubuntu-22.04-aarch64-all:
|
||||
- ../configure
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check V=1
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
||||
ubuntu-22.04-aarch64-alldbg:
|
||||
@ -62,7 +62,7 @@ ubuntu-22.04-aarch64-alldbg:
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make clean
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check V=1
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
||||
ubuntu-22.04-aarch64-clang:
|
||||
@ -84,7 +84,7 @@ ubuntu-22.04-aarch64-clang:
|
||||
- ../configure --disable-libssh --cc=clang-10 --cxx=clang++-10 --enable-sanitizers
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check V=1
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
||||
ubuntu-22.04-aarch64-tci:
|
||||
@ -126,5 +126,5 @@ ubuntu-22.04-aarch64-notcg:
|
||||
- ../configure --disable-tcg
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check V=1
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|| { cat meson-logs/testlog.txt; exit 1; } ;
|
||||
|
@ -10,7 +10,7 @@
|
||||
- ${CI_PROJECT_DIR}/msys64/var/cache
|
||||
needs: []
|
||||
stage: build
|
||||
timeout: 70m
|
||||
timeout: 80m
|
||||
before_script:
|
||||
- If ( !(Test-Path -Path msys64\var\cache ) ) {
|
||||
mkdir msys64\var\cache
|
||||
@ -41,11 +41,15 @@ msys2-64bit:
|
||||
mingw-w64-x86_64-gcc
|
||||
mingw-w64-x86_64-glib2
|
||||
mingw-w64-x86_64-gnutls
|
||||
mingw-w64-x86_64-gtk3
|
||||
mingw-w64-x86_64-libgcrypt
|
||||
mingw-w64-x86_64-libjpeg-turbo
|
||||
mingw-w64-x86_64-libnfs
|
||||
mingw-w64-x86_64-libpng
|
||||
mingw-w64-x86_64-libssh
|
||||
mingw-w64-x86_64-libtasn1
|
||||
mingw-w64-x86_64-libusb
|
||||
mingw-w64-x86_64-lzo2
|
||||
mingw-w64-x86_64-nettle
|
||||
mingw-w64-x86_64-ninja
|
||||
mingw-w64-x86_64-pixman
|
||||
@ -57,12 +61,21 @@ msys2-64bit:
|
||||
mingw-w64-x86_64-usbredir
|
||||
mingw-w64-x86_64-zstd "
|
||||
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
|
||||
- $env:MSYSTEM = 'MINGW64' # Start a 64 bit Mingw environment
|
||||
- $env:MSYSTEM = 'MINGW64' # Start a 64-bit MinGW environment
|
||||
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
|
||||
- .\msys64\usr\bin\bash -lc './configure --target-list=x86_64-softmmu
|
||||
--enable-capstone --without-default-devices'
|
||||
- .\msys64\usr\bin\bash -lc 'make'
|
||||
- .\msys64\usr\bin\bash -lc 'make check || { cat build/meson-logs/testlog.txt; exit 1; } ;'
|
||||
- mkdir output
|
||||
- cd output
|
||||
# Note: do not remove "--without-default-devices"!
|
||||
# commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
|
||||
# changed to compile QEMU with the --without-default-devices switch
|
||||
# for the msys2 64-bit job, due to the build could not complete within
|
||||
# the project timeout.
|
||||
- ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu
|
||||
--without-default-devices --disable-opengl'
|
||||
- ..\msys64\usr\bin\bash -lc 'make'
|
||||
# qTests don't run successfully with "--without-default-devices",
|
||||
# so let's exclude the qtests from CI for now.
|
||||
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" || { cat meson-logs/testlog.txt; exit 1; } ;'
|
||||
|
||||
msys2-32bit:
|
||||
extends: .shared_msys2_builder
|
||||
@ -79,21 +92,29 @@ msys2-32bit:
|
||||
mingw-w64-i686-gtk3
|
||||
mingw-w64-i686-libgcrypt
|
||||
mingw-w64-i686-libjpeg-turbo
|
||||
mingw-w64-i686-libnfs
|
||||
mingw-w64-i686-libpng
|
||||
mingw-w64-i686-libssh
|
||||
mingw-w64-i686-libtasn1
|
||||
mingw-w64-i686-libusb
|
||||
mingw-w64-i686-lzo2
|
||||
mingw-w64-i686-nettle
|
||||
mingw-w64-i686-ninja
|
||||
mingw-w64-i686-pixman
|
||||
mingw-w64-i686-pkgconf
|
||||
mingw-w64-i686-python
|
||||
mingw-w64-i686-SDL2
|
||||
mingw-w64-i686-SDL2_image
|
||||
mingw-w64-i686-snappy
|
||||
mingw-w64-i686-usbredir "
|
||||
mingw-w64-i686-usbredir
|
||||
mingw-w64-i686-zstd "
|
||||
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
|
||||
- $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinG environment
|
||||
- $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinGW environment
|
||||
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
|
||||
- mkdir output
|
||||
- cd output
|
||||
- ..\msys64\usr\bin\bash -lc "../configure --target-list=ppc64-softmmu"
|
||||
- ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu
|
||||
--disable-opengl'
|
||||
- ..\msys64\usr\bin\bash -lc 'make'
|
||||
- ..\msys64\usr\bin\bash -lc 'make check || { cat meson-logs/testlog.txt; exit 1; } ;'
|
||||
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" ||
|
||||
{ cat meson-logs/testlog.txt; exit 1; }'
|
||||
|
@ -18,11 +18,11 @@ https://www.qemu.org/contribute/security-process/
|
||||
-->
|
||||
|
||||
## Host environment
|
||||
- Operating system: (Windows 10 21H1, Fedora 34, etc.)
|
||||
- OS/kernel version: (For POSIX hosts, use `uname -a`)
|
||||
- Architecture: (x86, ARM, s390x, etc.)
|
||||
- QEMU flavor: (qemu-system-x86_64, qemu-aarch64, qemu-img, etc.)
|
||||
- QEMU version: (e.g. `qemu-system-x86_64 --version`)
|
||||
- Operating system: <!-- Windows 10 21H1, Fedora 37, etc. -->
|
||||
- OS/kernel version: <!-- For POSIX hosts, use `uname -a` -->
|
||||
- Architecture: <!-- x86, ARM, s390x, etc. -->
|
||||
- QEMU flavor: <!-- qemu-system-x86_64, qemu-aarch64, qemu-img, etc. -->
|
||||
- QEMU version: <!-- e.g. `qemu-system-x86_64 --version` -->
|
||||
- QEMU command line:
|
||||
<!--
|
||||
Give the smallest, complete command line that exhibits the problem.
|
||||
@ -35,9 +35,9 @@ https://www.qemu.org/contribute/security-process/
|
||||
```
|
||||
|
||||
## Emulated/Virtualized environment
|
||||
- Operating system: (Windows 10 21H1, Fedora 34, etc.)
|
||||
- OS/kernel version: (For POSIX guests, use `uname -a`.)
|
||||
- Architecture: (x86, ARM, s390x, etc.)
|
||||
- Operating system: <!-- Windows 10 21H1, Fedora 37, etc. -->
|
||||
- OS/kernel version: <!-- For POSIX guests, use `uname -a`. -->
|
||||
- Architecture: <!-- x86, ARM, s390x, etc. -->
|
||||
|
||||
|
||||
## Description of problem
|
||||
|
1
.mailmap
1
.mailmap
@ -45,6 +45,7 @@ Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nong
|
||||
Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
|
||||
|
||||
# Next, replace old addresses by a more recent one.
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
|
||||
|
51
MAINTAINERS
51
MAINTAINERS
@ -78,6 +78,7 @@ M: Laurent Vivier <laurent@vivier.eu>
|
||||
S: Maintained
|
||||
L: qemu-trivial@nongnu.org
|
||||
K: ^Subject:.*(?i)trivial
|
||||
F: docs/devel/trivial-patches.rst
|
||||
T: git git://git.corpit.ru/qemu.git trivial-patches
|
||||
T: git https://github.com/vivier/qemu.git trivial-patches
|
||||
|
||||
@ -112,6 +113,8 @@ M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
S: Odd Fixes
|
||||
K: ^Subject:.*(?i)mips
|
||||
F: docs/system/target-mips.rst
|
||||
F: configs/targets/mips*
|
||||
|
||||
Guest CPU cores (TCG)
|
||||
---------------------
|
||||
@ -129,6 +132,7 @@ F: util/cacheinfo.c
|
||||
F: util/cacheflush.c
|
||||
F: scripts/decodetree.py
|
||||
F: docs/devel/decodetree.rst
|
||||
F: docs/devel/tcg*
|
||||
F: include/exec/cpu*.h
|
||||
F: include/exec/exec-all.h
|
||||
F: include/exec/helper*.h
|
||||
@ -195,12 +199,20 @@ Hexagon TCG CPUs
|
||||
M: Taylor Simpson <tsimpson@quicinc.com>
|
||||
S: Supported
|
||||
F: target/hexagon/
|
||||
X: target/hexagon/idef-parser/
|
||||
X: target/hexagon/gen_idef_parser_funcs.py
|
||||
F: linux-user/hexagon/
|
||||
F: tests/tcg/hexagon/
|
||||
F: disas/hexagon.c
|
||||
F: configs/targets/hexagon-linux-user/default.mak
|
||||
F: docker/dockerfiles/debian-hexagon-cross.docker
|
||||
F: docker/dockerfiles/debian-hexagon-cross.docker.d/build-toolchain.sh
|
||||
|
||||
Hexagon idef-parser
|
||||
M: Alessandro Di Federico <ale@rev.ng>
|
||||
M: Anton Johansson <anjo@rev.ng>
|
||||
S: Supported
|
||||
F: target/hexagon/idef-parser/
|
||||
F: target/hexagon/gen_idef_parser_funcs.py
|
||||
|
||||
HPPA (PA-RISC) TCG CPUs
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
@ -254,6 +266,7 @@ F: tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh
|
||||
OpenRISC TCG CPUs
|
||||
M: Stafford Horne <shorne@gmail.com>
|
||||
S: Odd Fixes
|
||||
F: docs/system/openrisc/cpu-features.rst
|
||||
F: target/openrisc/
|
||||
F: hw/openrisc/
|
||||
F: tests/tcg/openrisc/
|
||||
@ -264,7 +277,7 @@ R: Cédric Le Goater <clg@kaod.org>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: target/ppc/
|
||||
F: hw/ppc/ppc.c
|
||||
F: hw/ppc/ppc_booke.c
|
||||
@ -332,6 +345,7 @@ F: target/i386/tcg/
|
||||
F: tests/tcg/i386/
|
||||
F: tests/tcg/x86_64/
|
||||
F: hw/i386/
|
||||
F: docs/system/i386/cpu.rst
|
||||
F: docs/system/cpu-models-x86*
|
||||
T: git https://gitlab.com/ehabkost/qemu.git x86-next
|
||||
|
||||
@ -389,7 +403,7 @@ M: Daniel Henrique Barboza <danielhb413@gmail.com>
|
||||
R: Cédric Le Goater <clg@kaod.org>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: target/ppc/kvm.c
|
||||
|
||||
S390 KVM CPUs
|
||||
@ -563,12 +577,14 @@ ARM Machines
|
||||
Allwinner-a10
|
||||
M: Beniamino Galvani <b.galvani@gmail.com>
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
R: Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/*/allwinner*
|
||||
F: include/hw/*/allwinner*
|
||||
F: hw/arm/cubieboard.c
|
||||
F: docs/system/arm/cubieboard.rst
|
||||
F: hw/misc/axp209.c
|
||||
|
||||
Allwinner-h3
|
||||
M: Niek Linnenbank <nieklinnenbank@gmail.com>
|
||||
@ -873,6 +889,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
|
||||
R: Jean-Christophe Dubois <jcd@tribudubois.net>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: docs/system/arm/sabrelite.rst
|
||||
F: hw/arm/sabrelite.c
|
||||
F: hw/arm/fsl-imx6.c
|
||||
F: hw/misc/imx6_*.c
|
||||
@ -1021,6 +1038,12 @@ L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/arm/netduinoplus2.c
|
||||
|
||||
Olimex STM32 H405
|
||||
M: Felipe Balbi <balbi@kernel.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/arm/olimex-stm32-h405.c
|
||||
|
||||
SmartFusion2
|
||||
M: Subbaraya Sundeep <sundeep.lkml@gmail.com>
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
@ -1273,6 +1296,7 @@ OpenRISC Machines
|
||||
or1k-sim
|
||||
M: Jia Liu <proljc@gmail.com>
|
||||
S: Maintained
|
||||
F: docs/system/openrisc/or1k-sim.rst
|
||||
F: hw/openrisc/openrisc_sim.c
|
||||
|
||||
PowerPC Machines
|
||||
@ -1367,7 +1391,7 @@ R: Cédric Le Goater <clg@kaod.org>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: hw/*/spapr*
|
||||
F: include/hw/*/spapr*
|
||||
F: hw/*/xics*
|
||||
@ -1644,8 +1668,8 @@ F: hw/isa/piix3.c
|
||||
F: hw/isa/lpc_ich9.c
|
||||
F: hw/i2c/smbus_ich9.c
|
||||
F: hw/acpi/piix4.c
|
||||
F: hw/acpi/ich9.c
|
||||
F: include/hw/acpi/ich9.h
|
||||
F: hw/acpi/ich9*.c
|
||||
F: include/hw/acpi/ich9*.h
|
||||
F: include/hw/southbridge/piix.h
|
||||
F: hw/misc/sga.c
|
||||
F: hw/isa/apm.c
|
||||
@ -2016,6 +2040,7 @@ F: hw/virtio/trace-events
|
||||
F: qapi/virtio.json
|
||||
F: net/vhost-user.c
|
||||
F: include/hw/virtio/
|
||||
F: docs/devel/virtio*
|
||||
|
||||
virtio-balloon
|
||||
M: Michael S. Tsirkin <mst@redhat.com>
|
||||
@ -2037,6 +2062,7 @@ X: hw/9pfs/xen-9p*
|
||||
F: fsdev/
|
||||
F: docs/tools/virtfs-proxy-helper.rst
|
||||
F: tests/qtest/virtio-9p-test.c
|
||||
F: tests/qtest/libqos/virtio-9p*
|
||||
T: git https://gitlab.com/gkurz/qemu.git 9p-next
|
||||
T: git https://github.com/cschoenebeck/qemu.git 9p.next
|
||||
|
||||
@ -2108,7 +2134,7 @@ F: tests/qtest/virtio-rng-test.c
|
||||
vhost-user-rng
|
||||
M: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
S: Supported
|
||||
F: docs/tools/vhost-user-rng.rst
|
||||
F: docs/system/devices/vhost-user-rng.rst
|
||||
F: hw/virtio/vhost-user-rng.c
|
||||
F: hw/virtio/vhost-user-rng-pci.c
|
||||
F: include/hw/virtio/vhost-user-rng.h
|
||||
@ -2146,7 +2172,7 @@ S: Supported
|
||||
F: hw/nvme/*
|
||||
F: include/block/nvme.h
|
||||
F: tests/qtest/nvme-test.c
|
||||
F: docs/system/nvme.rst
|
||||
F: docs/system/devices/nvme.rst
|
||||
T: git git://git.infradead.org/qemu-nvme.git nvme-next
|
||||
|
||||
megasas
|
||||
@ -2696,6 +2722,7 @@ GDB stub
|
||||
M: Alex Bennée <alex.bennee@linaro.org>
|
||||
R: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
S: Maintained
|
||||
F: docs/system/gdb.rst
|
||||
F: gdbstub/*
|
||||
F: include/exec/gdbstub.h
|
||||
F: gdb-xml/
|
||||
@ -2753,6 +2780,7 @@ F: ui/
|
||||
F: include/ui/
|
||||
F: qapi/ui.json
|
||||
F: util/drm.c
|
||||
F: docs/devel/ui.rst
|
||||
|
||||
Cocoa graphics
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
@ -2930,6 +2958,7 @@ M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
R: Daniel P. Berrange <berrange@redhat.com>
|
||||
R: Eduardo Habkost <eduardo@habkost.net>
|
||||
S: Supported
|
||||
F: docs/devel/qom.rst
|
||||
F: docs/qdev-device-use.txt
|
||||
F: hw/core/qdev*
|
||||
F: hw/core/bus.c
|
||||
@ -2978,6 +3007,7 @@ F: softmmu/qtest.c
|
||||
F: accel/qtest/
|
||||
F: tests/qtest/
|
||||
F: docs/devel/qgraph.rst
|
||||
F: docs/devel/qtest.rst
|
||||
X: tests/qtest/bios-tables-test*
|
||||
|
||||
Device Fuzzing
|
||||
@ -3044,6 +3074,7 @@ F: include/sysemu/tpm*
|
||||
F: qapi/tpm.json
|
||||
F: backends/tpm/
|
||||
F: tests/qtest/*tpm*
|
||||
F: docs/specs/tpm.rst
|
||||
T: git https://github.com/stefanberger/qemu-tpm.git tpm-next
|
||||
|
||||
Checkpatch
|
||||
@ -3195,7 +3226,8 @@ F: replay/*
|
||||
F: block/blkreplay.c
|
||||
F: net/filter-replay.c
|
||||
F: include/sysemu/replay.h
|
||||
F: docs/replay.txt
|
||||
F: docs/devel/replay.rst
|
||||
F: docs/system/replay.rst
|
||||
F: stubs/replay.c
|
||||
F: tests/avocado/replay_kernel.py
|
||||
F: tests/avocado/replay_linux.py
|
||||
@ -3719,6 +3751,7 @@ F: tests/docker/
|
||||
F: tests/vm/
|
||||
F: tests/lcitool/
|
||||
F: scripts/archive-source.sh
|
||||
F: docs/devel/testing.rst
|
||||
W: https://gitlab.com/qemu-project/qemu/pipelines
|
||||
W: https://travis-ci.org/qemu/qemu
|
||||
|
||||
|
154
accel/accel-blocker.c
Normal file
154
accel/accel-blocker.c
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
* Lock to inhibit accelerator ioctls
|
||||
*
|
||||
* Copyright (c) 2022 Red Hat Inc.
|
||||
*
|
||||
* Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "sysemu/accel-blocker.h"
|
||||
|
||||
static QemuLockCnt accel_in_ioctl_lock;
|
||||
static QemuEvent accel_in_ioctl_event;
|
||||
|
||||
void accel_blocker_init(void)
|
||||
{
|
||||
qemu_lockcnt_init(&accel_in_ioctl_lock);
|
||||
qemu_event_init(&accel_in_ioctl_event, false);
|
||||
}
|
||||
|
||||
void accel_ioctl_begin(void)
|
||||
{
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* block if lock is taken in kvm_ioctl_inhibit_begin() */
|
||||
qemu_lockcnt_inc(&accel_in_ioctl_lock);
|
||||
}
|
||||
|
||||
void accel_ioctl_end(void)
|
||||
{
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_lockcnt_dec(&accel_in_ioctl_lock);
|
||||
/* change event to SET. If event was BUSY, wake up all waiters */
|
||||
qemu_event_set(&accel_in_ioctl_event);
|
||||
}
|
||||
|
||||
void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* block if lock is taken in kvm_ioctl_inhibit_begin() */
|
||||
qemu_lockcnt_inc(&cpu->in_ioctl_lock);
|
||||
}
|
||||
|
||||
void accel_cpu_ioctl_end(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_lockcnt_dec(&cpu->in_ioctl_lock);
|
||||
/* change event to SET. If event was BUSY, wake up all waiters */
|
||||
qemu_event_set(&accel_in_ioctl_event);
|
||||
}
|
||||
|
||||
static bool accel_has_to_wait(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
bool needs_to_wait = false;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (qemu_lockcnt_count(&cpu->in_ioctl_lock)) {
|
||||
/* exit the ioctl, if vcpu is running it */
|
||||
qemu_cpu_kick(cpu);
|
||||
needs_to_wait = true;
|
||||
}
|
||||
}
|
||||
|
||||
return needs_to_wait || qemu_lockcnt_count(&accel_in_ioctl_lock);
|
||||
}
|
||||
|
||||
void accel_ioctl_inhibit_begin(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
/*
|
||||
* We allow to inhibit only when holding the BQL, so we can identify
|
||||
* when an inhibitor wants to issue an ioctl easily.
|
||||
*/
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
/* Block further invocations of the ioctls outside the BQL. */
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_lockcnt_lock(&cpu->in_ioctl_lock);
|
||||
}
|
||||
qemu_lockcnt_lock(&accel_in_ioctl_lock);
|
||||
|
||||
/* Keep waiting until there are running ioctls */
|
||||
while (true) {
|
||||
|
||||
/* Reset event to FREE. */
|
||||
qemu_event_reset(&accel_in_ioctl_event);
|
||||
|
||||
if (accel_has_to_wait()) {
|
||||
/*
|
||||
* If event is still FREE, and there are ioctls still in progress,
|
||||
* wait.
|
||||
*
|
||||
* If an ioctl finishes before qemu_event_wait(), it will change
|
||||
* the event state to SET. This will prevent qemu_event_wait() from
|
||||
* blocking, but it's not a problem because if other ioctls are
|
||||
* still running the loop will iterate once more and reset the event
|
||||
* status to FREE so that it can wait properly.
|
||||
*
|
||||
* If an ioctls finishes while qemu_event_wait() is blocking, then
|
||||
* it will be waken up, but also here the while loop makes sure
|
||||
* to re-enter the wait if there are other running ioctls.
|
||||
*/
|
||||
qemu_event_wait(&accel_in_ioctl_event);
|
||||
} else {
|
||||
/* No ioctl is running */
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void accel_ioctl_inhibit_end(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
qemu_lockcnt_unlock(&accel_in_ioctl_lock);
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_lockcnt_unlock(&cpu->in_ioctl_lock);
|
||||
}
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "sysemu/kvm_int.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/accel-blocker.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "exec/memory.h"
|
||||
#include "exec/ram_addr.h"
|
||||
@ -46,6 +47,7 @@
|
||||
#include "sysemu/hw_accel.h"
|
||||
#include "kvm-cpus.h"
|
||||
#include "sysemu/dirtylimit.h"
|
||||
#include "qemu/range.h"
|
||||
|
||||
#include "hw/boards.h"
|
||||
#include "monitor/stats.h"
|
||||
@ -1292,6 +1294,7 @@ void kvm_set_max_memslot_size(hwaddr max_slot_size)
|
||||
kvm_max_slot_size = max_slot_size;
|
||||
}
|
||||
|
||||
/* Called with KVMMemoryListener.slots_lock held */
|
||||
static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
MemoryRegionSection *section, bool add)
|
||||
{
|
||||
@ -1326,14 +1329,12 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
ram = memory_region_get_ram_ptr(mr) + mr_offset;
|
||||
ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
|
||||
|
||||
kvm_slots_lock();
|
||||
|
||||
if (!add) {
|
||||
do {
|
||||
slot_size = MIN(kvm_max_slot_size, size);
|
||||
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
|
||||
if (!mem) {
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
||||
/*
|
||||
@ -1371,7 +1372,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
start_addr += slot_size;
|
||||
size -= slot_size;
|
||||
} while (size);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* register the new slot */
|
||||
@ -1396,9 +1397,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
ram += slot_size;
|
||||
size -= slot_size;
|
||||
} while (size);
|
||||
|
||||
out:
|
||||
kvm_slots_unlock();
|
||||
}
|
||||
|
||||
static void *kvm_dirty_ring_reaper_thread(void *data)
|
||||
@ -1455,18 +1453,95 @@ static void kvm_region_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
|
||||
KVMMemoryUpdate *update;
|
||||
|
||||
memory_region_ref(section->mr);
|
||||
kvm_set_phys_mem(kml, section, true);
|
||||
update = g_new0(KVMMemoryUpdate, 1);
|
||||
update->section = *section;
|
||||
|
||||
QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
|
||||
}
|
||||
|
||||
static void kvm_region_del(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
|
||||
KVMMemoryUpdate *update;
|
||||
|
||||
kvm_set_phys_mem(kml, section, false);
|
||||
memory_region_unref(section->mr);
|
||||
update = g_new0(KVMMemoryUpdate, 1);
|
||||
update->section = *section;
|
||||
|
||||
QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
|
||||
}
|
||||
|
||||
static void kvm_region_commit(MemoryListener *listener)
|
||||
{
|
||||
KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
|
||||
listener);
|
||||
KVMMemoryUpdate *u1, *u2;
|
||||
bool need_inhibit = false;
|
||||
|
||||
if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
|
||||
QSIMPLEQ_EMPTY(&kml->transaction_del)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to be careful when regions to add overlap with ranges to remove.
|
||||
* We have to simulate atomic KVM memslot updates by making sure no ioctl()
|
||||
* is currently active.
|
||||
*
|
||||
* The lists are order by addresses, so it's easy to find overlaps.
|
||||
*/
|
||||
u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
|
||||
u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
|
||||
while (u1 && u2) {
|
||||
Range r1, r2;
|
||||
|
||||
range_init_nofail(&r1, u1->section.offset_within_address_space,
|
||||
int128_get64(u1->section.size));
|
||||
range_init_nofail(&r2, u2->section.offset_within_address_space,
|
||||
int128_get64(u2->section.size));
|
||||
|
||||
if (range_overlaps_range(&r1, &r2)) {
|
||||
need_inhibit = true;
|
||||
break;
|
||||
}
|
||||
if (range_lob(&r1) < range_lob(&r2)) {
|
||||
u1 = QSIMPLEQ_NEXT(u1, next);
|
||||
} else {
|
||||
u2 = QSIMPLEQ_NEXT(u2, next);
|
||||
}
|
||||
}
|
||||
|
||||
kvm_slots_lock();
|
||||
if (need_inhibit) {
|
||||
accel_ioctl_inhibit_begin();
|
||||
}
|
||||
|
||||
/* Remove all memslots before adding the new ones. */
|
||||
while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
|
||||
u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
|
||||
QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
|
||||
|
||||
kvm_set_phys_mem(kml, &u1->section, false);
|
||||
memory_region_unref(u1->section.mr);
|
||||
|
||||
g_free(u1);
|
||||
}
|
||||
while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
|
||||
u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
|
||||
QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
|
||||
|
||||
memory_region_ref(u1->section.mr);
|
||||
kvm_set_phys_mem(kml, &u1->section, true);
|
||||
|
||||
g_free(u1);
|
||||
}
|
||||
|
||||
if (need_inhibit) {
|
||||
accel_ioctl_inhibit_end();
|
||||
}
|
||||
kvm_slots_unlock();
|
||||
}
|
||||
|
||||
static void kvm_log_sync(MemoryListener *listener,
|
||||
@ -1610,8 +1685,12 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
|
||||
kml->slots[i].slot = i;
|
||||
}
|
||||
|
||||
QSIMPLEQ_INIT(&kml->transaction_add);
|
||||
QSIMPLEQ_INIT(&kml->transaction_del);
|
||||
|
||||
kml->listener.region_add = kvm_region_add;
|
||||
kml->listener.region_del = kvm_region_del;
|
||||
kml->listener.commit = kvm_region_commit;
|
||||
kml->listener.log_start = kvm_log_start;
|
||||
kml->listener.log_stop = kvm_log_stop;
|
||||
kml->listener.priority = 10;
|
||||
@ -2310,6 +2389,7 @@ static int kvm_init(MachineState *ms)
|
||||
assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
|
||||
|
||||
s->sigmask_len = 8;
|
||||
accel_blocker_init();
|
||||
|
||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
QTAILQ_INIT(&s->kvm_sw_breakpoints);
|
||||
@ -3014,7 +3094,9 @@ int kvm_vm_ioctl(KVMState *s, int type, ...)
|
||||
va_end(ap);
|
||||
|
||||
trace_kvm_vm_ioctl(type, arg);
|
||||
accel_ioctl_begin();
|
||||
ret = ioctl(s->vmfd, type, arg);
|
||||
accel_ioctl_end();
|
||||
if (ret == -1) {
|
||||
ret = -errno;
|
||||
}
|
||||
@ -3032,7 +3114,9 @@ int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
|
||||
va_end(ap);
|
||||
|
||||
trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
|
||||
accel_cpu_ioctl_begin(cpu);
|
||||
ret = ioctl(cpu->kvm_fd, type, arg);
|
||||
accel_cpu_ioctl_end(cpu);
|
||||
if (ret == -1) {
|
||||
ret = -errno;
|
||||
}
|
||||
@ -3050,7 +3134,9 @@ int kvm_device_ioctl(int fd, int type, ...)
|
||||
va_end(ap);
|
||||
|
||||
trace_kvm_device_ioctl(fd, type, arg);
|
||||
accel_ioctl_begin();
|
||||
ret = ioctl(fd, type, arg);
|
||||
accel_ioctl_end();
|
||||
if (ret == -1) {
|
||||
ret = -errno;
|
||||
}
|
||||
@ -3586,7 +3672,6 @@ static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
|
||||
Error **errp)
|
||||
{
|
||||
KVMState *s = KVM_STATE(obj);
|
||||
Error *error = NULL;
|
||||
uint32_t value;
|
||||
|
||||
if (s->fd != -1) {
|
||||
@ -3594,9 +3679,7 @@ static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
|
||||
return;
|
||||
}
|
||||
|
||||
visit_type_uint32(v, name, &value, &error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
if (!visit_type_uint32(v, name, &value, errp)) {
|
||||
return;
|
||||
}
|
||||
if (value & (value - 1)) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
specific_ss.add(files('accel-common.c'))
|
||||
specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
|
||||
softmmu_ss.add(files('accel-softmmu.c'))
|
||||
user_ss.add(files('accel-user.c'))
|
||||
|
||||
@ -11,10 +11,5 @@ if have_system
|
||||
subdir('stubs')
|
||||
endif
|
||||
|
||||
dummy_ss = ss.source_set()
|
||||
dummy_ss.add(files(
|
||||
'dummy-cpus.c',
|
||||
))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: dummy_ss)
|
||||
specific_ss.add_all(when: ['CONFIG_XEN'], if_true: dummy_ss)
|
||||
# qtest
|
||||
softmmu_ss.add(files('dummy-cpus.c'))
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include "qemu/atomic.h"
|
||||
#include "qemu/atomic128.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "trace/trace-root.h"
|
||||
#include "trace.h"
|
||||
#include "tb-hash.h"
|
||||
#include "internal.h"
|
||||
#ifdef CONFIG_PLUGIN
|
||||
@ -1356,7 +1356,6 @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
||||
MemoryRegionSection *section;
|
||||
MemoryRegion *mr;
|
||||
uint64_t val;
|
||||
bool locked = false;
|
||||
MemTxResult r;
|
||||
|
||||
section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
|
||||
@ -1367,11 +1366,11 @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
||||
cpu_io_recompile(cpu, retaddr);
|
||||
}
|
||||
|
||||
if (!qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_lock_iothread();
|
||||
locked = true;
|
||||
}
|
||||
{
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
|
||||
}
|
||||
|
||||
if (r != MEMTX_OK) {
|
||||
hwaddr physaddr = mr_offset +
|
||||
section->offset_within_address_space -
|
||||
@ -1380,10 +1379,6 @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
||||
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
|
||||
mmu_idx, full->attrs, r, retaddr);
|
||||
}
|
||||
if (locked) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -1410,7 +1405,6 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
|
||||
hwaddr mr_offset;
|
||||
MemoryRegionSection *section;
|
||||
MemoryRegion *mr;
|
||||
bool locked = false;
|
||||
MemTxResult r;
|
||||
|
||||
section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
|
||||
@ -1427,11 +1421,11 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
|
||||
*/
|
||||
save_iotlb_data(cpu, section, mr_offset);
|
||||
|
||||
if (!qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_lock_iothread();
|
||||
locked = true;
|
||||
}
|
||||
{
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
|
||||
}
|
||||
|
||||
if (r != MEMTX_OK) {
|
||||
hwaddr physaddr = mr_offset +
|
||||
section->offset_within_address_space -
|
||||
@ -1441,9 +1435,6 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
|
||||
MMU_DATA_STORE, mmu_idx, full->attrs, r,
|
||||
retaddr);
|
||||
}
|
||||
if (locked) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
}
|
||||
|
||||
static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
|
||||
@ -1508,10 +1499,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
|
||||
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
|
||||
|
||||
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
|
||||
struct page_collection *pages
|
||||
= page_collection_lock(ram_addr, ram_addr + size);
|
||||
tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
|
||||
page_collection_unlock(pages);
|
||||
tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -23,83 +23,28 @@
|
||||
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
||||
#endif
|
||||
|
||||
typedef struct PageDesc {
|
||||
/* list of TBs intersecting this ram page */
|
||||
uintptr_t first_tb;
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
unsigned long flags;
|
||||
void *target_data;
|
||||
#endif
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
QemuSpin lock;
|
||||
#endif
|
||||
} PageDesc;
|
||||
|
||||
/* Size of the L2 (and L3, etc) page tables. */
|
||||
#define V_L2_BITS 10
|
||||
#define V_L2_SIZE (1 << V_L2_BITS)
|
||||
|
||||
/*
|
||||
* L1 Mapping properties
|
||||
*/
|
||||
extern int v_l1_size;
|
||||
extern int v_l1_shift;
|
||||
extern int v_l2_levels;
|
||||
|
||||
/*
|
||||
* The bottom level has pointers to PageDesc, and is indexed by
|
||||
* anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
|
||||
*/
|
||||
#define V_L1_MIN_BITS 4
|
||||
#define V_L1_MAX_BITS (V_L2_BITS + 3)
|
||||
#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
|
||||
|
||||
extern void *l1_map[V_L1_MAX_SIZE];
|
||||
|
||||
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
|
||||
|
||||
static inline PageDesc *page_find(tb_page_addr_t index)
|
||||
{
|
||||
return page_find_alloc(index, false);
|
||||
}
|
||||
|
||||
/* list iterators for lists of tagged pointers in TranslationBlock */
|
||||
#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
|
||||
for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
|
||||
tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
|
||||
tb = (TranslationBlock *)((uintptr_t)tb & ~1))
|
||||
|
||||
#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
|
||||
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
|
||||
|
||||
#define TB_FOR_EACH_JMP(head_tb, tb, n) \
|
||||
TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
|
||||
|
||||
/* In user-mode page locks aren't used; mmap_lock is enough */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
|
||||
static inline void page_lock(PageDesc *pd) { }
|
||||
static inline void page_unlock(PageDesc *pd) { }
|
||||
#else
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
void do_assert_page_locked(const PageDesc *pd, const char *file, int line);
|
||||
#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
|
||||
#else
|
||||
#define assert_page_locked(pd)
|
||||
#endif
|
||||
void page_lock(PageDesc *pd);
|
||||
void page_unlock(PageDesc *pd);
|
||||
#endif
|
||||
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
|
||||
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
|
||||
void assert_no_pages_locked(void);
|
||||
#else
|
||||
static inline void assert_no_pages_locked(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline void page_table_config_init(void) { }
|
||||
#else
|
||||
void page_table_config_init(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
|
||||
unsigned size,
|
||||
uintptr_t retaddr);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
|
||||
target_ulong cs_base, uint32_t flags,
|
||||
int cflags);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
void page_init(void);
|
||||
void tb_htable_init(void);
|
||||
void tb_reset_jump(TranslationBlock *tb, int n);
|
||||
|
@ -258,10 +258,13 @@ static TCGOp *rm_ops(TCGOp *op)
|
||||
|
||||
static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
|
||||
{
|
||||
*begin_op = QTAILQ_NEXT(*begin_op, link);
|
||||
tcg_debug_assert(*begin_op);
|
||||
op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc);
|
||||
memcpy(op->args, (*begin_op)->args, sizeof(op->args));
|
||||
TCGOp *old_op = QTAILQ_NEXT(*begin_op, link);
|
||||
unsigned nargs = old_op->nargs;
|
||||
|
||||
*begin_op = old_op;
|
||||
op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs);
|
||||
memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs);
|
||||
|
||||
return op;
|
||||
}
|
||||
|
||||
@ -381,32 +384,23 @@ static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
|
||||
static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
|
||||
void *func, int *cb_idx)
|
||||
{
|
||||
TCGOp *old_op;
|
||||
int func_idx;
|
||||
|
||||
/* copy all ops until the call */
|
||||
do {
|
||||
op = copy_op_nocheck(begin_op, op);
|
||||
} while (op->opc != INDEX_op_call);
|
||||
|
||||
/* fill in the op call */
|
||||
op->param1 = (*begin_op)->param1;
|
||||
op->param2 = (*begin_op)->param2;
|
||||
old_op = *begin_op;
|
||||
TCGOP_CALLI(op) = TCGOP_CALLI(old_op);
|
||||
TCGOP_CALLO(op) = TCGOP_CALLO(old_op);
|
||||
tcg_debug_assert(op->life == 0);
|
||||
if (*cb_idx == -1) {
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Instead of working out the position of the callback in args[], just
|
||||
* look for @empty_func, since it should be a unique pointer.
|
||||
*/
|
||||
for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) {
|
||||
if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) {
|
||||
*cb_idx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
tcg_debug_assert(i < MAX_OPC_PARAM_ARGS);
|
||||
}
|
||||
op->args[*cb_idx] = (uintptr_t)func;
|
||||
op->args[*cb_idx + 1] = (*begin_op)->args[*cb_idx + 1];
|
||||
func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op);
|
||||
*cb_idx = func_idx;
|
||||
op->args[func_idx] = (uintptr_t)func;
|
||||
|
||||
return op;
|
||||
}
|
||||
@ -424,11 +418,11 @@ static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
|
||||
op = copy_const_ptr(&begin_op, op, cb->userp);
|
||||
|
||||
/* copy the ld_i32, but note that we only have to copy it once */
|
||||
if (*cb_idx == -1) {
|
||||
op = copy_op(&begin_op, op, INDEX_op_ld_i32);
|
||||
} else {
|
||||
begin_op = QTAILQ_NEXT(begin_op, link);
|
||||
tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
|
||||
if (*cb_idx == -1) {
|
||||
op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
|
||||
memcpy(op->args, begin_op->args, sizeof(op->args));
|
||||
}
|
||||
|
||||
/* call */
|
||||
@ -471,11 +465,11 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
|
||||
op = copy_const_ptr(&begin_op, op, cb->userp);
|
||||
|
||||
/* copy the ld_i32, but note that we only have to copy it once */
|
||||
if (*cb_idx == -1) {
|
||||
op = copy_op(&begin_op, op, INDEX_op_ld_i32);
|
||||
} else {
|
||||
begin_op = QTAILQ_NEXT(begin_op, link);
|
||||
tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
|
||||
if (*cb_idx == -1) {
|
||||
op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
|
||||
memcpy(op->args, begin_op->args, sizeof(op->args));
|
||||
}
|
||||
|
||||
/* extu_tl_i64 */
|
||||
|
1000
accel/tcg/tb-maint.c
1000
accel/tcg/tb-maint.c
File diff suppressed because it is too large
Load Diff
@ -6,5 +6,9 @@ exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
|
||||
exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
|
||||
exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=0x%x"
|
||||
|
||||
# cputlb.c
|
||||
memory_notdirty_write_access(uint64_t vaddr, uint64_t ram_addr, unsigned size) "0x%" PRIx64 " ram_addr 0x%" PRIx64 " size %u"
|
||||
memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64
|
||||
|
||||
# translate-all.c
|
||||
translate_block(void *tb, uintptr_t pc, const void *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"
|
||||
|
@ -670,105 +670,13 @@ void libafl_add_backdoor_hook(void (*exec)(target_ulong id, uint64_t data),
|
||||
|
||||
//// --- End LibAFL code ---
|
||||
|
||||
/* make various TB consistency checks */
|
||||
|
||||
/**
|
||||
* struct page_entry - page descriptor entry
|
||||
* @pd: pointer to the &struct PageDesc of the page this entry represents
|
||||
* @index: page index of the page
|
||||
* @locked: whether the page is locked
|
||||
*
|
||||
* This struct helps us keep track of the locked state of a page, without
|
||||
* bloating &struct PageDesc.
|
||||
*
|
||||
* A page lock protects accesses to all fields of &struct PageDesc.
|
||||
*
|
||||
* See also: &struct page_collection.
|
||||
*/
|
||||
struct page_entry {
|
||||
PageDesc *pd;
|
||||
tb_page_addr_t index;
|
||||
bool locked;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
|
||||
* @tree: Binary search tree (BST) of the pages, with key == page index
|
||||
* @max: Pointer to the page in @tree with the highest page index
|
||||
*
|
||||
* To avoid deadlock we lock pages in ascending order of page index.
|
||||
* When operating on a set of pages, we need to keep track of them so that
|
||||
* we can lock them in order and also unlock them later. For this we collect
|
||||
* pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
|
||||
* @tree implementation we use does not provide an O(1) operation to obtain the
|
||||
* highest-ranked element, we use @max to keep track of the inserted page
|
||||
* with the highest index. This is valuable because if a page is not in
|
||||
* the tree and its index is higher than @max's, then we can lock it
|
||||
* without breaking the locking order rule.
|
||||
*
|
||||
* Note on naming: 'struct page_set' would be shorter, but we already have a few
|
||||
* page_set_*() helpers, so page_collection is used instead to avoid confusion.
|
||||
*
|
||||
* See also: page_collection_lock().
|
||||
*/
|
||||
struct page_collection {
|
||||
GTree *tree;
|
||||
struct page_entry *max;
|
||||
};
|
||||
|
||||
/*
|
||||
* In system mode we want L1_MAP to be based on ram offsets,
|
||||
* while in user mode we want it to be based on virtual addresses.
|
||||
*
|
||||
* TODO: For user mode, see the caveat re host vs guest virtual
|
||||
* address spaces near GUEST_ADDR_MAX.
|
||||
*/
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
|
||||
# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
|
||||
#else
|
||||
# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
|
||||
#endif
|
||||
#else
|
||||
# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
|
||||
#endif
|
||||
|
||||
/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
|
||||
QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
|
||||
sizeof_field(TranslationBlock, trace_vcpu_dstate)
|
||||
* BITS_PER_BYTE);
|
||||
|
||||
/*
|
||||
* L1 Mapping properties
|
||||
*/
|
||||
int v_l1_size;
|
||||
int v_l1_shift;
|
||||
int v_l2_levels;
|
||||
|
||||
void *l1_map[V_L1_MAX_SIZE];
|
||||
|
||||
TBContext tb_ctx;
|
||||
|
||||
static void page_table_config_init(void)
|
||||
{
|
||||
uint32_t v_l1_bits;
|
||||
|
||||
assert(TARGET_PAGE_BITS);
|
||||
/* The bits remaining after N lower levels of page tables. */
|
||||
v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
|
||||
if (v_l1_bits < V_L1_MIN_BITS) {
|
||||
v_l1_bits += V_L2_BITS;
|
||||
}
|
||||
|
||||
v_l1_size = 1 << v_l1_bits;
|
||||
v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
|
||||
v_l2_levels = v_l1_shift / V_L2_BITS - 1;
|
||||
|
||||
assert(v_l1_bits <= V_L1_MAX_BITS);
|
||||
assert(v_l1_shift % V_L2_BITS == 0);
|
||||
assert(v_l2_levels >= 0);
|
||||
}
|
||||
|
||||
/* Encode VAL as a signed leb128 sequence at P.
|
||||
Return P incremented past the encoded value. */
|
||||
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
|
||||
@ -961,393 +869,7 @@ void page_init(void)
|
||||
{
|
||||
page_size_init();
|
||||
page_table_config_init();
|
||||
|
||||
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
|
||||
{
|
||||
#ifdef HAVE_KINFO_GETVMMAP
|
||||
struct kinfo_vmentry *freep;
|
||||
int i, cnt;
|
||||
|
||||
freep = kinfo_getvmmap(getpid(), &cnt);
|
||||
if (freep) {
|
||||
mmap_lock();
|
||||
for (i = 0; i < cnt; i++) {
|
||||
unsigned long startaddr, endaddr;
|
||||
|
||||
startaddr = freep[i].kve_start;
|
||||
endaddr = freep[i].kve_end;
|
||||
if (h2g_valid(startaddr)) {
|
||||
startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
|
||||
|
||||
if (h2g_valid(endaddr)) {
|
||||
endaddr = h2g(endaddr);
|
||||
page_set_flags(startaddr, endaddr, PAGE_RESERVED);
|
||||
} else {
|
||||
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
|
||||
endaddr = ~0ul;
|
||||
page_set_flags(startaddr, endaddr, PAGE_RESERVED);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
free(freep);
|
||||
mmap_unlock();
|
||||
}
|
||||
#else
|
||||
FILE *f;
|
||||
|
||||
last_brk = (unsigned long)sbrk(0);
|
||||
|
||||
f = fopen("/compat/linux/proc/self/maps", "r");
|
||||
if (f) {
|
||||
mmap_lock();
|
||||
|
||||
do {
|
||||
unsigned long startaddr, endaddr;
|
||||
int n;
|
||||
|
||||
n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
|
||||
|
||||
if (n == 2 && h2g_valid(startaddr)) {
|
||||
startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
|
||||
|
||||
if (h2g_valid(endaddr)) {
|
||||
endaddr = h2g(endaddr);
|
||||
} else {
|
||||
endaddr = ~0ul;
|
||||
}
|
||||
page_set_flags(startaddr, endaddr, PAGE_RESERVED);
|
||||
}
|
||||
} while (!feof(f));
|
||||
|
||||
fclose(f);
|
||||
mmap_unlock();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
|
||||
{
|
||||
PageDesc *pd;
|
||||
void **lp;
|
||||
int i;
|
||||
|
||||
/* Level 1. Always allocated. */
|
||||
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
||||
|
||||
/* Level 2..N-1. */
|
||||
for (i = v_l2_levels; i > 0; i--) {
|
||||
void **p = qatomic_rcu_read(lp);
|
||||
|
||||
if (p == NULL) {
|
||||
void *existing;
|
||||
|
||||
if (!alloc) {
|
||||
return NULL;
|
||||
}
|
||||
p = g_new0(void *, V_L2_SIZE);
|
||||
existing = qatomic_cmpxchg(lp, NULL, p);
|
||||
if (unlikely(existing)) {
|
||||
g_free(p);
|
||||
p = existing;
|
||||
}
|
||||
}
|
||||
|
||||
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
||||
}
|
||||
|
||||
pd = qatomic_rcu_read(lp);
|
||||
if (pd == NULL) {
|
||||
void *existing;
|
||||
|
||||
if (!alloc) {
|
||||
return NULL;
|
||||
}
|
||||
pd = g_new0(PageDesc, V_L2_SIZE);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < V_L2_SIZE; i++) {
|
||||
qemu_spin_init(&pd[i].lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
existing = qatomic_cmpxchg(lp, NULL, pd);
|
||||
if (unlikely(existing)) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < V_L2_SIZE; i++) {
|
||||
qemu_spin_destroy(&pd[i].lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
g_free(pd);
|
||||
pd = existing;
|
||||
}
|
||||
}
|
||||
|
||||
return pd + (index & (V_L2_SIZE - 1));
|
||||
}
|
||||
|
||||
/* In user-mode page locks aren't used; mmap_lock is enough */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
struct page_collection *
|
||||
page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void page_collection_unlock(struct page_collection *set)
|
||||
{ }
|
||||
#else /* !CONFIG_USER_ONLY */
|
||||
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
|
||||
static __thread GHashTable *ht_pages_locked_debug;
|
||||
|
||||
static void ht_pages_locked_debug_init(void)
|
||||
{
|
||||
if (ht_pages_locked_debug) {
|
||||
return;
|
||||
}
|
||||
ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
|
||||
}
|
||||
|
||||
static bool page_is_locked(const PageDesc *pd)
|
||||
{
|
||||
PageDesc *found;
|
||||
|
||||
ht_pages_locked_debug_init();
|
||||
found = g_hash_table_lookup(ht_pages_locked_debug, pd);
|
||||
return !!found;
|
||||
}
|
||||
|
||||
static void page_lock__debug(PageDesc *pd)
|
||||
{
|
||||
ht_pages_locked_debug_init();
|
||||
g_assert(!page_is_locked(pd));
|
||||
g_hash_table_insert(ht_pages_locked_debug, pd, pd);
|
||||
}
|
||||
|
||||
static void page_unlock__debug(const PageDesc *pd)
|
||||
{
|
||||
bool removed;
|
||||
|
||||
ht_pages_locked_debug_init();
|
||||
g_assert(page_is_locked(pd));
|
||||
removed = g_hash_table_remove(ht_pages_locked_debug, pd);
|
||||
g_assert(removed);
|
||||
}
|
||||
|
||||
void do_assert_page_locked(const PageDesc *pd, const char *file, int line)
|
||||
{
|
||||
if (unlikely(!page_is_locked(pd))) {
|
||||
error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
|
||||
pd, file, line);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void assert_no_pages_locked(void)
|
||||
{
|
||||
ht_pages_locked_debug_init();
|
||||
g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_DEBUG_TCG */
|
||||
|
||||
static inline void page_lock__debug(const PageDesc *pd) { }
|
||||
static inline void page_unlock__debug(const PageDesc *pd) { }
|
||||
|
||||
#endif /* CONFIG_DEBUG_TCG */
|
||||
|
||||
void page_lock(PageDesc *pd)
|
||||
{
|
||||
page_lock__debug(pd);
|
||||
qemu_spin_lock(&pd->lock);
|
||||
}
|
||||
|
||||
void page_unlock(PageDesc *pd)
|
||||
{
|
||||
qemu_spin_unlock(&pd->lock);
|
||||
page_unlock__debug(pd);
|
||||
}
|
||||
|
||||
static inline struct page_entry *
|
||||
page_entry_new(PageDesc *pd, tb_page_addr_t index)
|
||||
{
|
||||
struct page_entry *pe = g_malloc(sizeof(*pe));
|
||||
|
||||
pe->index = index;
|
||||
pe->pd = pd;
|
||||
pe->locked = false;
|
||||
return pe;
|
||||
}
|
||||
|
||||
static void page_entry_destroy(gpointer p)
|
||||
{
|
||||
struct page_entry *pe = p;
|
||||
|
||||
g_assert(pe->locked);
|
||||
page_unlock(pe->pd);
|
||||
g_free(pe);
|
||||
}
|
||||
|
||||
/* returns false on success */
|
||||
static bool page_entry_trylock(struct page_entry *pe)
|
||||
{
|
||||
bool busy;
|
||||
|
||||
busy = qemu_spin_trylock(&pe->pd->lock);
|
||||
if (!busy) {
|
||||
g_assert(!pe->locked);
|
||||
pe->locked = true;
|
||||
page_lock__debug(pe->pd);
|
||||
}
|
||||
return busy;
|
||||
}
|
||||
|
||||
static void do_page_entry_lock(struct page_entry *pe)
|
||||
{
|
||||
page_lock(pe->pd);
|
||||
g_assert(!pe->locked);
|
||||
pe->locked = true;
|
||||
}
|
||||
|
||||
static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
struct page_entry *pe = value;
|
||||
|
||||
do_page_entry_lock(pe);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
struct page_entry *pe = value;
|
||||
|
||||
if (pe->locked) {
|
||||
pe->locked = false;
|
||||
page_unlock(pe->pd);
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trylock a page, and if successful, add the page to a collection.
|
||||
* Returns true ("busy") if the page could not be locked; false otherwise.
|
||||
*/
|
||||
static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
|
||||
{
|
||||
tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
|
||||
struct page_entry *pe;
|
||||
PageDesc *pd;
|
||||
|
||||
pe = g_tree_lookup(set->tree, &index);
|
||||
if (pe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
pd = page_find(index);
|
||||
if (pd == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
pe = page_entry_new(pd, index);
|
||||
g_tree_insert(set->tree, &pe->index, pe);
|
||||
|
||||
/*
|
||||
* If this is either (1) the first insertion or (2) a page whose index
|
||||
* is higher than any other so far, just lock the page and move on.
|
||||
*/
|
||||
if (set->max == NULL || pe->index > set->max->index) {
|
||||
set->max = pe;
|
||||
do_page_entry_lock(pe);
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* Try to acquire out-of-order lock; if busy, return busy so that we acquire
|
||||
* locks in order.
|
||||
*/
|
||||
return page_entry_trylock(pe);
|
||||
}
|
||||
|
||||
static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
|
||||
{
|
||||
tb_page_addr_t a = *(const tb_page_addr_t *)ap;
|
||||
tb_page_addr_t b = *(const tb_page_addr_t *)bp;
|
||||
|
||||
if (a == b) {
|
||||
return 0;
|
||||
} else if (a < b) {
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock a range of pages ([@start,@end[) as well as the pages of all
|
||||
* intersecting TBs.
|
||||
* Locking order: acquire locks in ascending order of page index.
|
||||
*/
|
||||
struct page_collection *
|
||||
page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
|
||||
{
|
||||
struct page_collection *set = g_malloc(sizeof(*set));
|
||||
tb_page_addr_t index;
|
||||
PageDesc *pd;
|
||||
|
||||
start >>= TARGET_PAGE_BITS;
|
||||
end >>= TARGET_PAGE_BITS;
|
||||
g_assert(start <= end);
|
||||
|
||||
set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
|
||||
page_entry_destroy);
|
||||
set->max = NULL;
|
||||
assert_no_pages_locked();
|
||||
|
||||
retry:
|
||||
g_tree_foreach(set->tree, page_entry_lock, NULL);
|
||||
|
||||
for (index = start; index <= end; index++) {
|
||||
TranslationBlock *tb;
|
||||
int n;
|
||||
|
||||
pd = page_find(index);
|
||||
if (pd == NULL) {
|
||||
continue;
|
||||
}
|
||||
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
|
||||
g_tree_foreach(set->tree, page_entry_unlock, NULL);
|
||||
goto retry;
|
||||
}
|
||||
assert_page_locked(pd);
|
||||
PAGE_FOR_EACH_TB(pd, tb, n) {
|
||||
if (page_trylock_add(set, tb_page_addr0(tb)) ||
|
||||
(tb_page_addr1(tb) != -1 &&
|
||||
page_trylock_add(set, tb_page_addr1(tb)))) {
|
||||
/* drop all locks, and reacquire in order */
|
||||
g_tree_foreach(set->tree, page_entry_unlock, NULL);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
return set;
|
||||
}
|
||||
|
||||
void page_collection_unlock(struct page_collection *set)
|
||||
{
|
||||
/* entries are unlocked and freed via page_entry_destroy */
|
||||
g_tree_destroy(set->tree);
|
||||
g_free(set);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
/*
|
||||
* Isolate the portion of code gen which can setjmp/longjmp.
|
||||
@ -2054,339 +1576,6 @@ void cpu_interrupt(CPUState *cpu, int mask)
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Walks guest process memory "regions" one by one
|
||||
* and calls callback function 'fn' for each region.
|
||||
*/
|
||||
struct walk_memory_regions_data {
|
||||
walk_memory_regions_fn fn;
|
||||
void *priv;
|
||||
target_ulong start;
|
||||
int prot;
|
||||
};
|
||||
|
||||
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
|
||||
target_ulong end, int new_prot)
|
||||
{
|
||||
if (data->start != -1u) {
|
||||
int rc = data->fn(data->priv, data->start, end, data->prot);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
data->start = (new_prot ? end : -1u);
|
||||
data->prot = new_prot;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
|
||||
target_ulong base, int level, void **lp)
|
||||
{
|
||||
target_ulong pa;
|
||||
int i, rc;
|
||||
|
||||
if (*lp == NULL) {
|
||||
return walk_memory_regions_end(data, base, 0);
|
||||
}
|
||||
|
||||
if (level == 0) {
|
||||
PageDesc *pd = *lp;
|
||||
|
||||
for (i = 0; i < V_L2_SIZE; ++i) {
|
||||
int prot = pd[i].flags;
|
||||
|
||||
pa = base | (i << TARGET_PAGE_BITS);
|
||||
if (prot != data->prot) {
|
||||
rc = walk_memory_regions_end(data, pa, prot);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
void **pp = *lp;
|
||||
|
||||
for (i = 0; i < V_L2_SIZE; ++i) {
|
||||
pa = base | ((target_ulong)i <<
|
||||
(TARGET_PAGE_BITS + V_L2_BITS * level));
|
||||
rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
|
||||
{
|
||||
struct walk_memory_regions_data data;
|
||||
uintptr_t i, l1_sz = v_l1_size;
|
||||
|
||||
data.fn = fn;
|
||||
data.priv = priv;
|
||||
data.start = -1u;
|
||||
data.prot = 0;
|
||||
|
||||
for (i = 0; i < l1_sz; i++) {
|
||||
target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
|
||||
int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return walk_memory_regions_end(&data, 0, 0);
|
||||
}
|
||||
|
||||
static int dump_region(void *priv, target_ulong start,
|
||||
target_ulong end, unsigned long prot)
|
||||
{
|
||||
FILE *f = (FILE *)priv;
|
||||
|
||||
(void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
|
||||
" "TARGET_FMT_lx" %c%c%c\n",
|
||||
start, end, end - start,
|
||||
((prot & PAGE_READ) ? 'r' : '-'),
|
||||
((prot & PAGE_WRITE) ? 'w' : '-'),
|
||||
((prot & PAGE_EXEC) ? 'x' : '-'));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* dump memory mappings */
|
||||
void page_dump(FILE *f)
|
||||
{
|
||||
const int length = sizeof(target_ulong) * 2;
|
||||
(void) fprintf(f, "%-*s %-*s %-*s %s\n",
|
||||
length, "start", length, "end", length, "size", "prot");
|
||||
walk_memory_regions(f, dump_region);
|
||||
}
|
||||
|
||||
int page_get_flags(target_ulong address)
|
||||
{
|
||||
PageDesc *p;
|
||||
|
||||
p = page_find(address >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
return 0;
|
||||
}
|
||||
return p->flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the target to decide if PAGE_TARGET_[12] may be reset.
|
||||
* By default, they are not kept.
|
||||
*/
|
||||
#ifndef PAGE_TARGET_STICKY
|
||||
#define PAGE_TARGET_STICKY 0
|
||||
#endif
|
||||
#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
|
||||
|
||||
/* Modify the flags of a page and invalidate the code if necessary.
|
||||
The flag PAGE_WRITE_ORG is positioned automatically depending
|
||||
on PAGE_WRITE. The mmap_lock should already be held. */
|
||||
void page_set_flags(target_ulong start, target_ulong end, int flags)
|
||||
{
|
||||
target_ulong addr, len;
|
||||
bool reset, inval_tb = false;
|
||||
|
||||
/* This function should never be called with addresses outside the
|
||||
guest address space. If this assert fires, it probably indicates
|
||||
a missing call to h2g_valid. */
|
||||
assert(end - 1 <= GUEST_ADDR_MAX);
|
||||
assert(start < end);
|
||||
/* Only set PAGE_ANON with new mappings. */
|
||||
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
|
||||
assert_memory_lock();
|
||||
|
||||
start = start & TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_ALIGN(end);
|
||||
|
||||
if (flags & PAGE_WRITE) {
|
||||
flags |= PAGE_WRITE_ORG;
|
||||
}
|
||||
reset = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
|
||||
if (reset) {
|
||||
page_reset_target_data(start, end);
|
||||
}
|
||||
flags &= ~PAGE_RESET;
|
||||
|
||||
for (addr = start, len = end - start;
|
||||
len != 0;
|
||||
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
|
||||
PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true);
|
||||
|
||||
/*
|
||||
* If the page was executable, but is reset, or is no longer
|
||||
* executable, or has become writable, then invalidate any code.
|
||||
*/
|
||||
if ((p->flags & PAGE_EXEC)
|
||||
&& (reset ||
|
||||
!(flags & PAGE_EXEC) ||
|
||||
(flags & ~p->flags & PAGE_WRITE))) {
|
||||
inval_tb = true;
|
||||
}
|
||||
/* Using mprotect on a page does not change sticky bits. */
|
||||
p->flags = (reset ? 0 : p->flags & PAGE_STICKY) | flags;
|
||||
}
|
||||
|
||||
if (inval_tb) {
|
||||
tb_invalidate_phys_range(start, end);
|
||||
}
|
||||
}
|
||||
|
||||
int page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
{
|
||||
PageDesc *p;
|
||||
target_ulong end;
|
||||
target_ulong addr;
|
||||
|
||||
/* This function should never be called with addresses outside the
|
||||
guest address space. If this assert fires, it probably indicates
|
||||
a missing call to h2g_valid. */
|
||||
if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
|
||||
assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
|
||||
}
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (start + len - 1 < start) {
|
||||
/* We've wrapped around. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* must do before we loose bits in the next step */
|
||||
end = TARGET_PAGE_ALIGN(start + len);
|
||||
start = start & TARGET_PAGE_MASK;
|
||||
|
||||
for (addr = start, len = end - start;
|
||||
len != 0;
|
||||
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
return -1;
|
||||
}
|
||||
if (!(p->flags & PAGE_VALID)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
|
||||
return -1;
|
||||
}
|
||||
if (flags & PAGE_WRITE) {
|
||||
if (!(p->flags & PAGE_WRITE_ORG)) {
|
||||
return -1;
|
||||
}
|
||||
/* unprotect the page if it was put read-only because it
|
||||
contains translated code */
|
||||
if (!(p->flags & PAGE_WRITE)) {
|
||||
if (!page_unprotect(addr, 0)) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void page_protect(tb_page_addr_t page_addr)
|
||||
{
|
||||
target_ulong addr;
|
||||
PageDesc *p;
|
||||
int prot;
|
||||
|
||||
p = page_find(page_addr >> TARGET_PAGE_BITS);
|
||||
if (p && (p->flags & PAGE_WRITE)) {
|
||||
/*
|
||||
* Force the host page as non writable (writes will have a page fault +
|
||||
* mprotect overhead).
|
||||
*/
|
||||
page_addr &= qemu_host_page_mask;
|
||||
prot = 0;
|
||||
for (addr = page_addr; addr < page_addr + qemu_host_page_size;
|
||||
addr += TARGET_PAGE_SIZE) {
|
||||
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
continue;
|
||||
}
|
||||
prot |= p->flags;
|
||||
p->flags &= ~PAGE_WRITE;
|
||||
}
|
||||
mprotect(g2h_untagged(page_addr), qemu_host_page_size,
|
||||
(prot & PAGE_BITS) & ~PAGE_WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
/* called from signal handler: invalidate the code and unprotect the
|
||||
* page. Return 0 if the fault was not handled, 1 if it was handled,
|
||||
* and 2 if it was handled but the caller must cause the TB to be
|
||||
* immediately exited. (We can only return 2 if the 'pc' argument is
|
||||
* non-zero.)
|
||||
*/
|
||||
int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
{
|
||||
unsigned int prot;
|
||||
bool current_tb_invalidated;
|
||||
PageDesc *p;
|
||||
target_ulong host_start, host_end, addr;
|
||||
|
||||
/* Technically this isn't safe inside a signal handler. However we
|
||||
know this only ever happens in a synchronous SEGV handler, so in
|
||||
practice it seems to be ok. */
|
||||
mmap_lock();
|
||||
|
||||
p = page_find(address >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
mmap_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* if the page was really writable, then we change its
|
||||
protection back to writable */
|
||||
if (p->flags & PAGE_WRITE_ORG) {
|
||||
current_tb_invalidated = false;
|
||||
if (p->flags & PAGE_WRITE) {
|
||||
/* If the page is actually marked WRITE then assume this is because
|
||||
* this thread raced with another one which got here first and
|
||||
* set the page to PAGE_WRITE and did the TB invalidate for us.
|
||||
*/
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
TranslationBlock *current_tb = tcg_tb_lookup(pc);
|
||||
if (current_tb) {
|
||||
current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
host_start = address & qemu_host_page_mask;
|
||||
host_end = host_start + qemu_host_page_size;
|
||||
|
||||
prot = 0;
|
||||
for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
p->flags |= PAGE_WRITE;
|
||||
prot |= p->flags;
|
||||
|
||||
/* and since the content will be modified, we must invalidate
|
||||
the corresponding translated code. */
|
||||
current_tb_invalidated |=
|
||||
tb_invalidate_phys_page_unwind(addr, pc);
|
||||
}
|
||||
mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
|
||||
prot & PAGE_BITS);
|
||||
}
|
||||
mmap_unlock();
|
||||
/* If current TB was invalidated return to main loop */
|
||||
return current_tb_invalidated ? 2 : 1;
|
||||
}
|
||||
mmap_unlock();
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
/*
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "exec/helper-proto.h"
|
||||
@ -135,6 +136,593 @@ bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct PageFlagsNode {
|
||||
struct rcu_head rcu;
|
||||
IntervalTreeNode itree;
|
||||
int flags;
|
||||
} PageFlagsNode;
|
||||
|
||||
static IntervalTreeRoot pageflags_root;
|
||||
|
||||
static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
|
||||
{
|
||||
IntervalTreeNode *n;
|
||||
|
||||
n = interval_tree_iter_first(&pageflags_root, start, last);
|
||||
return n ? container_of(n, PageFlagsNode, itree) : NULL;
|
||||
}
|
||||
|
||||
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
|
||||
target_long last)
|
||||
{
|
||||
IntervalTreeNode *n;
|
||||
|
||||
n = interval_tree_iter_next(&p->itree, start, last);
|
||||
return n ? container_of(n, PageFlagsNode, itree) : NULL;
|
||||
}
|
||||
|
||||
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
|
||||
{
|
||||
IntervalTreeNode *n;
|
||||
int rc = 0;
|
||||
|
||||
mmap_lock();
|
||||
for (n = interval_tree_iter_first(&pageflags_root, 0, -1);
|
||||
n != NULL;
|
||||
n = interval_tree_iter_next(n, 0, -1)) {
|
||||
PageFlagsNode *p = container_of(n, PageFlagsNode, itree);
|
||||
|
||||
rc = fn(priv, n->start, n->last + 1, p->flags);
|
||||
if (rc != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
mmap_unlock();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dump_region(void *priv, target_ulong start,
|
||||
target_ulong end, unsigned long prot)
|
||||
{
|
||||
FILE *f = (FILE *)priv;
|
||||
|
||||
fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
|
||||
start, end, end - start,
|
||||
((prot & PAGE_READ) ? 'r' : '-'),
|
||||
((prot & PAGE_WRITE) ? 'w' : '-'),
|
||||
((prot & PAGE_EXEC) ? 'x' : '-'));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* dump memory mappings */
|
||||
void page_dump(FILE *f)
|
||||
{
|
||||
const int length = sizeof(target_ulong) * 2;
|
||||
|
||||
fprintf(f, "%-*s %-*s %-*s %s\n",
|
||||
length, "start", length, "end", length, "size", "prot");
|
||||
walk_memory_regions(f, dump_region);
|
||||
}
|
||||
|
||||
int page_get_flags(target_ulong address)
|
||||
{
|
||||
PageFlagsNode *p = pageflags_find(address, address);
|
||||
|
||||
/*
|
||||
* See util/interval-tree.c re lockless lookups: no false positives but
|
||||
* there are false negatives. If we find nothing, retry with the mmap
|
||||
* lock acquired.
|
||||
*/
|
||||
if (p) {
|
||||
return p->flags;
|
||||
}
|
||||
if (have_mmap_lock()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
mmap_lock();
|
||||
p = pageflags_find(address, address);
|
||||
mmap_unlock();
|
||||
return p ? p->flags : 0;
|
||||
}
|
||||
|
||||
/* A subroutine of page_set_flags: insert a new node for [start,last]. */
|
||||
static void pageflags_create(target_ulong start, target_ulong last, int flags)
|
||||
{
|
||||
PageFlagsNode *p = g_new(PageFlagsNode, 1);
|
||||
|
||||
p->itree.start = start;
|
||||
p->itree.last = last;
|
||||
p->flags = flags;
|
||||
interval_tree_insert(&p->itree, &pageflags_root);
|
||||
}
|
||||
|
||||
/* A subroutine of page_set_flags: remove everything in [start,last]. */
|
||||
static bool pageflags_unset(target_ulong start, target_ulong last)
|
||||
{
|
||||
bool inval_tb = false;
|
||||
|
||||
while (true) {
|
||||
PageFlagsNode *p = pageflags_find(start, last);
|
||||
target_ulong p_last;
|
||||
|
||||
if (!p) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (p->flags & PAGE_EXEC) {
|
||||
inval_tb = true;
|
||||
}
|
||||
|
||||
interval_tree_remove(&p->itree, &pageflags_root);
|
||||
p_last = p->itree.last;
|
||||
|
||||
if (p->itree.start < start) {
|
||||
/* Truncate the node from the end, or split out the middle. */
|
||||
p->itree.last = start - 1;
|
||||
interval_tree_insert(&p->itree, &pageflags_root);
|
||||
if (last < p_last) {
|
||||
pageflags_create(last + 1, p_last, p->flags);
|
||||
break;
|
||||
}
|
||||
} else if (p_last <= last) {
|
||||
/* Range completely covers node -- remove it. */
|
||||
g_free_rcu(p, rcu);
|
||||
} else {
|
||||
/* Truncate the node from the start. */
|
||||
p->itree.start = last + 1;
|
||||
interval_tree_insert(&p->itree, &pageflags_root);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return inval_tb;
|
||||
}
|
||||
|
||||
/*
|
||||
* A subroutine of page_set_flags: nothing overlaps [start,last],
|
||||
* but check adjacent mappings and maybe merge into a single range.
|
||||
*/
|
||||
static void pageflags_create_merge(target_ulong start, target_ulong last,
|
||||
int flags)
|
||||
{
|
||||
PageFlagsNode *next = NULL, *prev = NULL;
|
||||
|
||||
if (start > 0) {
|
||||
prev = pageflags_find(start - 1, start - 1);
|
||||
if (prev) {
|
||||
if (prev->flags == flags) {
|
||||
interval_tree_remove(&prev->itree, &pageflags_root);
|
||||
} else {
|
||||
prev = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (last + 1 != 0) {
|
||||
next = pageflags_find(last + 1, last + 1);
|
||||
if (next) {
|
||||
if (next->flags == flags) {
|
||||
interval_tree_remove(&next->itree, &pageflags_root);
|
||||
} else {
|
||||
next = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (prev) {
|
||||
if (next) {
|
||||
prev->itree.last = next->itree.last;
|
||||
g_free_rcu(next, rcu);
|
||||
} else {
|
||||
prev->itree.last = last;
|
||||
}
|
||||
interval_tree_insert(&prev->itree, &pageflags_root);
|
||||
} else if (next) {
|
||||
next->itree.start = start;
|
||||
interval_tree_insert(&next->itree, &pageflags_root);
|
||||
} else {
|
||||
pageflags_create(start, last, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the target to decide if PAGE_TARGET_[12] may be reset.
|
||||
* By default, they are not kept.
|
||||
*/
|
||||
#ifndef PAGE_TARGET_STICKY
|
||||
#define PAGE_TARGET_STICKY 0
|
||||
#endif
|
||||
#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
|
||||
|
||||
/* A subroutine of page_set_flags: add flags to [start,last]. */
|
||||
static bool pageflags_set_clear(target_ulong start, target_ulong last,
|
||||
int set_flags, int clear_flags)
|
||||
{
|
||||
PageFlagsNode *p;
|
||||
target_ulong p_start, p_last;
|
||||
int p_flags, merge_flags;
|
||||
bool inval_tb = false;
|
||||
|
||||
restart:
|
||||
p = pageflags_find(start, last);
|
||||
if (!p) {
|
||||
if (set_flags) {
|
||||
pageflags_create_merge(start, last, set_flags);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
p_start = p->itree.start;
|
||||
p_last = p->itree.last;
|
||||
p_flags = p->flags;
|
||||
/* Using mprotect on a page does not change sticky bits. */
|
||||
merge_flags = (p_flags & ~clear_flags) | set_flags;
|
||||
|
||||
/*
|
||||
* Need to flush if an overlapping executable region
|
||||
* removes exec, or adds write.
|
||||
*/
|
||||
if ((p_flags & PAGE_EXEC)
|
||||
&& (!(merge_flags & PAGE_EXEC)
|
||||
|| (merge_flags & ~p_flags & PAGE_WRITE))) {
|
||||
inval_tb = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is an exact range match, update and return without
|
||||
* attempting to merge with adjacent regions.
|
||||
*/
|
||||
if (start == p_start && last == p_last) {
|
||||
if (merge_flags) {
|
||||
p->flags = merge_flags;
|
||||
} else {
|
||||
interval_tree_remove(&p->itree, &pageflags_root);
|
||||
g_free_rcu(p, rcu);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* If sticky bits affect the original mapping, then we must be more
|
||||
* careful about the existing intervals and the separate flags.
|
||||
*/
|
||||
if (set_flags != merge_flags) {
|
||||
if (p_start < start) {
|
||||
interval_tree_remove(&p->itree, &pageflags_root);
|
||||
p->itree.last = start - 1;
|
||||
interval_tree_insert(&p->itree, &pageflags_root);
|
||||
|
||||
if (last < p_last) {
|
||||
if (merge_flags) {
|
||||
pageflags_create(start, last, merge_flags);
|
||||
}
|
||||
pageflags_create(last + 1, p_last, p_flags);
|
||||
} else {
|
||||
if (merge_flags) {
|
||||
pageflags_create(start, p_last, merge_flags);
|
||||
}
|
||||
if (p_last < last) {
|
||||
start = p_last + 1;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (start < p_start && set_flags) {
|
||||
pageflags_create(start, p_start - 1, set_flags);
|
||||
}
|
||||
if (last < p_last) {
|
||||
interval_tree_remove(&p->itree, &pageflags_root);
|
||||
p->itree.start = last + 1;
|
||||
interval_tree_insert(&p->itree, &pageflags_root);
|
||||
if (merge_flags) {
|
||||
pageflags_create(start, last, merge_flags);
|
||||
}
|
||||
} else {
|
||||
if (merge_flags) {
|
||||
p->flags = merge_flags;
|
||||
} else {
|
||||
interval_tree_remove(&p->itree, &pageflags_root);
|
||||
g_free_rcu(p, rcu);
|
||||
}
|
||||
if (p_last < last) {
|
||||
start = p_last + 1;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* If flags are not changing for this range, incorporate it. */
|
||||
if (set_flags == p_flags) {
|
||||
if (start < p_start) {
|
||||
interval_tree_remove(&p->itree, &pageflags_root);
|
||||
p->itree.start = start;
|
||||
interval_tree_insert(&p->itree, &pageflags_root);
|
||||
}
|
||||
if (p_last < last) {
|
||||
start = p_last + 1;
|
||||
goto restart;
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Maybe split out head and/or tail ranges with the original flags. */
|
||||
interval_tree_remove(&p->itree, &pageflags_root);
|
||||
if (p_start < start) {
|
||||
p->itree.last = start - 1;
|
||||
interval_tree_insert(&p->itree, &pageflags_root);
|
||||
|
||||
if (p_last < last) {
|
||||
goto restart;
|
||||
}
|
||||
if (last < p_last) {
|
||||
pageflags_create(last + 1, p_last, p_flags);
|
||||
}
|
||||
} else if (last < p_last) {
|
||||
p->itree.start = last + 1;
|
||||
interval_tree_insert(&p->itree, &pageflags_root);
|
||||
} else {
|
||||
g_free_rcu(p, rcu);
|
||||
goto restart;
|
||||
}
|
||||
if (set_flags) {
|
||||
pageflags_create(start, last, set_flags);
|
||||
}
|
||||
|
||||
done:
|
||||
return inval_tb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modify the flags of a page and invalidate the code if necessary.
|
||||
* The flag PAGE_WRITE_ORG is positioned automatically depending
|
||||
* on PAGE_WRITE. The mmap_lock should already be held.
|
||||
*/
|
||||
void page_set_flags(target_ulong start, target_ulong end, int flags)
|
||||
{
|
||||
target_ulong last;
|
||||
bool reset = false;
|
||||
bool inval_tb = false;
|
||||
|
||||
/* This function should never be called with addresses outside the
|
||||
guest address space. If this assert fires, it probably indicates
|
||||
a missing call to h2g_valid. */
|
||||
assert(start < end);
|
||||
assert(end - 1 <= GUEST_ADDR_MAX);
|
||||
/* Only set PAGE_ANON with new mappings. */
|
||||
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
|
||||
assert_memory_lock();
|
||||
|
||||
start = start & TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_ALIGN(end);
|
||||
last = end - 1;
|
||||
|
||||
if (!(flags & PAGE_VALID)) {
|
||||
flags = 0;
|
||||
} else {
|
||||
reset = flags & PAGE_RESET;
|
||||
flags &= ~PAGE_RESET;
|
||||
if (flags & PAGE_WRITE) {
|
||||
flags |= PAGE_WRITE_ORG;
|
||||
}
|
||||
}
|
||||
|
||||
if (!flags || reset) {
|
||||
page_reset_target_data(start, end);
|
||||
inval_tb |= pageflags_unset(start, last);
|
||||
}
|
||||
if (flags) {
|
||||
inval_tb |= pageflags_set_clear(start, last, flags,
|
||||
~(reset ? 0 : PAGE_STICKY));
|
||||
}
|
||||
if (inval_tb) {
|
||||
tb_invalidate_phys_range(start, end);
|
||||
}
|
||||
}
|
||||
|
||||
int page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
{
|
||||
target_ulong last;
|
||||
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
|
||||
int ret;
|
||||
|
||||
if (len == 0) {
|
||||
return 0; /* trivial length */
|
||||
}
|
||||
|
||||
last = start + len - 1;
|
||||
if (last < start) {
|
||||
return -1; /* wrap around */
|
||||
}
|
||||
|
||||
locked = have_mmap_lock();
|
||||
while (true) {
|
||||
PageFlagsNode *p = pageflags_find(start, last);
|
||||
int missing;
|
||||
|
||||
if (!p) {
|
||||
if (!locked) {
|
||||
/*
|
||||
* Lockless lookups have false negatives.
|
||||
* Retry with the lock held.
|
||||
*/
|
||||
mmap_lock();
|
||||
locked = -1;
|
||||
p = pageflags_find(start, last);
|
||||
}
|
||||
if (!p) {
|
||||
ret = -1; /* entire region invalid */
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (start < p->itree.start) {
|
||||
ret = -1; /* initial bytes invalid */
|
||||
break;
|
||||
}
|
||||
|
||||
missing = flags & ~p->flags;
|
||||
if (missing & PAGE_READ) {
|
||||
ret = -1; /* page not readable */
|
||||
break;
|
||||
}
|
||||
if (missing & PAGE_WRITE) {
|
||||
if (!(p->flags & PAGE_WRITE_ORG)) {
|
||||
ret = -1; /* page not writable */
|
||||
break;
|
||||
}
|
||||
/* Asking about writable, but has been protected: undo. */
|
||||
if (!page_unprotect(start, 0)) {
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
/* TODO: page_unprotect should take a range, not a single page. */
|
||||
if (last - start < TARGET_PAGE_SIZE) {
|
||||
ret = 0; /* ok */
|
||||
break;
|
||||
}
|
||||
start += TARGET_PAGE_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (last <= p->itree.last) {
|
||||
ret = 0; /* ok */
|
||||
break;
|
||||
}
|
||||
start = p->itree.last + 1;
|
||||
}
|
||||
|
||||
/* Release the lock if acquired locally. */
|
||||
if (locked < 0) {
|
||||
mmap_unlock();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void page_protect(tb_page_addr_t address)
|
||||
{
|
||||
PageFlagsNode *p;
|
||||
target_ulong start, last;
|
||||
int prot;
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
|
||||
start = address & TARGET_PAGE_MASK;
|
||||
last = start + TARGET_PAGE_SIZE - 1;
|
||||
} else {
|
||||
start = address & qemu_host_page_mask;
|
||||
last = start + qemu_host_page_size - 1;
|
||||
}
|
||||
|
||||
p = pageflags_find(start, last);
|
||||
if (!p) {
|
||||
return;
|
||||
}
|
||||
prot = p->flags;
|
||||
|
||||
if (unlikely(p->itree.last < last)) {
|
||||
/* More than one protection region covers the one host page. */
|
||||
assert(TARGET_PAGE_SIZE < qemu_host_page_size);
|
||||
while ((p = pageflags_next(p, start, last)) != NULL) {
|
||||
prot |= p->flags;
|
||||
}
|
||||
}
|
||||
|
||||
if (prot & PAGE_WRITE) {
|
||||
pageflags_set_clear(start, last, 0, PAGE_WRITE);
|
||||
mprotect(g2h_untagged(start), qemu_host_page_size,
|
||||
prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from signal handler: invalidate the code and unprotect the
|
||||
* page. Return 0 if the fault was not handled, 1 if it was handled,
|
||||
* and 2 if it was handled but the caller must cause the TB to be
|
||||
* immediately exited. (We can only return 2 if the 'pc' argument is
|
||||
* non-zero.)
|
||||
*/
|
||||
int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
{
|
||||
PageFlagsNode *p;
|
||||
bool current_tb_invalidated;
|
||||
|
||||
/*
|
||||
* Technically this isn't safe inside a signal handler. However we
|
||||
* know this only ever happens in a synchronous SEGV handler, so in
|
||||
* practice it seems to be ok.
|
||||
*/
|
||||
mmap_lock();
|
||||
|
||||
p = pageflags_find(address, address);
|
||||
|
||||
/* If this address was not really writable, nothing to do. */
|
||||
if (!p || !(p->flags & PAGE_WRITE_ORG)) {
|
||||
mmap_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
current_tb_invalidated = false;
|
||||
if (p->flags & PAGE_WRITE) {
|
||||
/*
|
||||
* If the page is actually marked WRITE then assume this is because
|
||||
* this thread raced with another one which got here first and
|
||||
* set the page to PAGE_WRITE and did the TB invalidate for us.
|
||||
*/
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
TranslationBlock *current_tb = tcg_tb_lookup(pc);
|
||||
if (current_tb) {
|
||||
current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
target_ulong start, len, i;
|
||||
int prot;
|
||||
|
||||
if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
|
||||
start = address & TARGET_PAGE_MASK;
|
||||
len = TARGET_PAGE_SIZE;
|
||||
prot = p->flags | PAGE_WRITE;
|
||||
pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
|
||||
current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
|
||||
} else {
|
||||
start = address & qemu_host_page_mask;
|
||||
len = qemu_host_page_size;
|
||||
prot = 0;
|
||||
|
||||
for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
|
||||
target_ulong addr = start + i;
|
||||
|
||||
p = pageflags_find(addr, addr);
|
||||
if (p) {
|
||||
prot |= p->flags;
|
||||
if (p->flags & PAGE_WRITE_ORG) {
|
||||
prot |= PAGE_WRITE;
|
||||
pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1,
|
||||
PAGE_WRITE, 0);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Since the content will be modified, we must invalidate
|
||||
* the corresponding translated code.
|
||||
*/
|
||||
current_tb_invalidated |=
|
||||
tb_invalidate_phys_page_unwind(addr, pc);
|
||||
}
|
||||
}
|
||||
if (prot & PAGE_EXEC) {
|
||||
prot = (prot & ~PAGE_EXEC) | PAGE_READ;
|
||||
}
|
||||
mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
|
||||
}
|
||||
mmap_unlock();
|
||||
|
||||
/* If current TB was invalidated return to main loop */
|
||||
return current_tb_invalidated ? 2 : 1;
|
||||
}
|
||||
|
||||
static int probe_access_internal(CPUArchState *env, target_ulong addr,
|
||||
int fault_size, MMUAccessType access_type,
|
||||
bool nonfault, uintptr_t ra)
|
||||
@ -210,47 +798,96 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
|
||||
return addr;
|
||||
}
|
||||
|
||||
#ifdef TARGET_PAGE_DATA_SIZE
|
||||
/*
|
||||
* Allocate chunks of target data together. For the only current user,
|
||||
* if we allocate one hunk per page, we have overhead of 40/128 or 40%.
|
||||
* Therefore, allocate memory for 64 pages at a time for overhead < 1%.
|
||||
*/
|
||||
#define TPD_PAGES 64
|
||||
#define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
|
||||
|
||||
typedef struct TargetPageDataNode {
|
||||
struct rcu_head rcu;
|
||||
IntervalTreeNode itree;
|
||||
char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
|
||||
} TargetPageDataNode;
|
||||
|
||||
static IntervalTreeRoot targetdata_root;
|
||||
|
||||
void page_reset_target_data(target_ulong start, target_ulong end)
|
||||
{
|
||||
#ifdef TARGET_PAGE_DATA_SIZE
|
||||
target_ulong addr, len;
|
||||
IntervalTreeNode *n, *next;
|
||||
target_ulong last;
|
||||
|
||||
/*
|
||||
* This function should never be called with addresses outside the
|
||||
* guest address space. If this assert fires, it probably indicates
|
||||
* a missing call to h2g_valid.
|
||||
*/
|
||||
assert(end - 1 <= GUEST_ADDR_MAX);
|
||||
assert(start < end);
|
||||
assert_memory_lock();
|
||||
|
||||
start = start & TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_ALIGN(end);
|
||||
last = TARGET_PAGE_ALIGN(end) - 1;
|
||||
|
||||
for (addr = start, len = end - start;
|
||||
len != 0;
|
||||
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
|
||||
PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
|
||||
for (n = interval_tree_iter_first(&targetdata_root, start, last),
|
||||
next = n ? interval_tree_iter_next(n, start, last) : NULL;
|
||||
n != NULL;
|
||||
n = next,
|
||||
next = next ? interval_tree_iter_next(n, start, last) : NULL) {
|
||||
target_ulong n_start, n_last, p_ofs, p_len;
|
||||
TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
|
||||
|
||||
g_free(p->target_data);
|
||||
p->target_data = NULL;
|
||||
}
|
||||
#endif
|
||||
if (n->start >= start && n->last <= last) {
|
||||
interval_tree_remove(n, &targetdata_root);
|
||||
g_free_rcu(t, rcu);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (n->start < start) {
|
||||
n_start = start;
|
||||
p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
|
||||
} else {
|
||||
n_start = n->start;
|
||||
p_ofs = 0;
|
||||
}
|
||||
n_last = MIN(last, n->last);
|
||||
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
|
||||
|
||||
memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef TARGET_PAGE_DATA_SIZE
|
||||
void *page_get_target_data(target_ulong address)
|
||||
{
|
||||
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
|
||||
void *ret = p->target_data;
|
||||
IntervalTreeNode *n;
|
||||
TargetPageDataNode *t;
|
||||
target_ulong page, region;
|
||||
|
||||
if (!ret) {
|
||||
ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
|
||||
p->target_data = ret;
|
||||
page = address & TARGET_PAGE_MASK;
|
||||
region = address & TBD_MASK;
|
||||
|
||||
n = interval_tree_iter_first(&targetdata_root, page, page);
|
||||
if (!n) {
|
||||
/*
|
||||
* See util/interval-tree.c re lockless lookups: no false positives
|
||||
* but there are false negatives. If we find nothing, retry with
|
||||
* the mmap lock acquired. We also need the lock for the
|
||||
* allocation + insert.
|
||||
*/
|
||||
mmap_lock();
|
||||
n = interval_tree_iter_first(&targetdata_root, page, page);
|
||||
if (!n) {
|
||||
t = g_new0(TargetPageDataNode, 1);
|
||||
n = &t->itree;
|
||||
n->start = region;
|
||||
n->last = region | ~TBD_MASK;
|
||||
interval_tree_insert(n, &targetdata_root);
|
||||
}
|
||||
return ret;
|
||||
mmap_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
t = container_of(n, TargetPageDataNode, itree);
|
||||
return t->data[(page - region) >> TARGET_PAGE_BITS];
|
||||
}
|
||||
#else
|
||||
void page_reset_target_data(target_ulong start, target_ulong end) { }
|
||||
#endif /* TARGET_PAGE_DATA_SIZE */
|
||||
|
||||
/* The softmmu versions of these helpers are in cputlb.c. */
|
||||
|
||||
|
@ -449,7 +449,7 @@ static int alsa_open(bool in, struct alsa_params_req *req,
|
||||
snd_pcm_hw_params_t *hw_params;
|
||||
int err;
|
||||
unsigned int freq, nchannels;
|
||||
const char *pcm_name = apdo->has_dev ? apdo->dev : "default";
|
||||
const char *pcm_name = apdo->dev ?: "default";
|
||||
snd_pcm_uframes_t obt_buffer_size;
|
||||
const char *typ = in ? "ADC" : "DAC";
|
||||
snd_pcm_format_t obtfmt;
|
||||
|
@ -2035,15 +2035,13 @@ void audio_create_pdos(Audiodev *dev)
|
||||
switch (dev->driver) {
|
||||
#define CASE(DRIVER, driver, pdo_name) \
|
||||
case AUDIODEV_DRIVER_##DRIVER: \
|
||||
if (!dev->u.driver.has_in) { \
|
||||
if (!dev->u.driver.in) { \
|
||||
dev->u.driver.in = g_malloc0( \
|
||||
sizeof(Audiodev##pdo_name##PerDirectionOptions)); \
|
||||
dev->u.driver.has_in = true; \
|
||||
} \
|
||||
if (!dev->u.driver.has_out) { \
|
||||
if (!dev->u.driver.out) { \
|
||||
dev->u.driver.out = g_malloc0( \
|
||||
sizeof(Audiodev##pdo_name##PerDirectionOptions)); \
|
||||
dev->u.driver.has_out = true; \
|
||||
} \
|
||||
break
|
||||
|
||||
|
@ -62,15 +62,12 @@ static void get_int(const char *env, uint32_t *dst, bool *has_dst)
|
||||
}
|
||||
}
|
||||
|
||||
static void get_str(const char *env, char **dst, bool *has_dst)
|
||||
static void get_str(const char *env, char **dst)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
if (*has_dst) {
|
||||
g_free(*dst);
|
||||
}
|
||||
*dst = g_strdup(val);
|
||||
*has_dst = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,7 +166,7 @@ static void handle_alsa_per_direction(
|
||||
get_bool(buf, &apdo->try_poll, &apdo->has_try_poll);
|
||||
|
||||
strcpy(buf + len, "DEV");
|
||||
get_str(buf, &apdo->dev, &apdo->has_dev);
|
||||
get_str(buf, &apdo->dev);
|
||||
|
||||
strcpy(buf + len, "SIZE_IN_USEC");
|
||||
get_bool(buf, &size_in_usecs, &dummy);
|
||||
@ -235,7 +232,7 @@ static void handle_oss_per_direction(
|
||||
const char *dev_env)
|
||||
{
|
||||
get_bool(try_poll_env, &opdo->try_poll, &opdo->has_try_poll);
|
||||
get_str(dev_env, &opdo->dev, &opdo->has_dev);
|
||||
get_str(dev_env, &opdo->dev);
|
||||
|
||||
get_bytes_to_usecs("QEMU_OSS_FRAGSIZE",
|
||||
&opdo->buffer_length, &opdo->has_buffer_length,
|
||||
@ -261,7 +258,7 @@ static void handle_oss(Audiodev *dev)
|
||||
static void handle_pa_per_direction(
|
||||
AudiodevPaPerDirectionOptions *ppdo, const char *env)
|
||||
{
|
||||
get_str(env, &ppdo->name, &ppdo->has_name);
|
||||
get_str(env, &ppdo->name);
|
||||
}
|
||||
|
||||
static void handle_pa(Audiodev *dev)
|
||||
@ -278,7 +275,7 @@ static void handle_pa(Audiodev *dev)
|
||||
&dev->u.pa.out->has_buffer_length,
|
||||
qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.out));
|
||||
|
||||
get_str("QEMU_PA_SERVER", &dev->u.pa.server, &dev->u.pa.has_server);
|
||||
get_str("QEMU_PA_SERVER", &dev->u.pa.server);
|
||||
}
|
||||
|
||||
/* SDL */
|
||||
@ -299,7 +296,7 @@ static void handle_wav(Audiodev *dev)
|
||||
&dev->u.wav.out->has_format);
|
||||
get_int("QEMU_WAV_DAC_FIXED_CHANNELS",
|
||||
&dev->u.wav.out->channels, &dev->u.wav.out->has_channels);
|
||||
get_str("QEMU_WAV_PATH", &dev->u.wav.path, &dev->u.wav.has_path);
|
||||
get_str("QEMU_WAV_PATH", &dev->u.wav.path);
|
||||
}
|
||||
|
||||
/* general */
|
||||
|
@ -252,7 +252,7 @@ static int oss_open(int in, struct oss_params *req, audsettings *as,
|
||||
audio_buf_info abinfo;
|
||||
int fmt, freq, nchannels;
|
||||
int setfragment = 1;
|
||||
const char *dspname = opdo->has_dev ? opdo->dev : "/dev/dsp";
|
||||
const char *dspname = opdo->dev ?: "/dev/dsp";
|
||||
const char *typ = in ? "ADC" : "DAC";
|
||||
#ifdef USE_DSP_POLICY
|
||||
int policy = oopts->has_dsp_policy ? oopts->dsp_policy : 5;
|
||||
@ -745,10 +745,8 @@ static void *oss_audio_init(Audiodev *dev)
|
||||
oss_init_per_direction(oopts->in);
|
||||
oss_init_per_direction(oopts->out);
|
||||
|
||||
if (access(oopts->in->has_dev ? oopts->in->dev : "/dev/dsp",
|
||||
R_OK | W_OK) < 0 ||
|
||||
access(oopts->out->has_dev ? oopts->out->dev : "/dev/dsp",
|
||||
R_OK | W_OK) < 0) {
|
||||
if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0 ||
|
||||
access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
return dev;
|
||||
|
@ -536,9 +536,9 @@ static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as,
|
||||
|
||||
pa->stream = qpa_simple_new (
|
||||
c,
|
||||
ppdo->has_stream_name ? ppdo->stream_name : g->dev->id,
|
||||
ppdo->stream_name ?: g->dev->id,
|
||||
PA_STREAM_PLAYBACK,
|
||||
ppdo->has_name ? ppdo->name : NULL,
|
||||
ppdo->name,
|
||||
&ss,
|
||||
&ba, /* buffering attributes */
|
||||
&error
|
||||
@ -585,9 +585,9 @@ static int qpa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
|
||||
|
||||
pa->stream = qpa_simple_new (
|
||||
c,
|
||||
ppdo->has_stream_name ? ppdo->stream_name : g->dev->id,
|
||||
ppdo->stream_name ?: g->dev->id,
|
||||
PA_STREAM_RECORD,
|
||||
ppdo->has_name ? ppdo->name : NULL,
|
||||
ppdo->name,
|
||||
&ss,
|
||||
&ba, /* buffering attributes */
|
||||
&error
|
||||
@ -827,7 +827,7 @@ static void *qpa_audio_init(Audiodev *dev)
|
||||
|
||||
assert(dev->driver == AUDIODEV_DRIVER_PA);
|
||||
|
||||
if (!popts->has_server) {
|
||||
if (!popts->server) {
|
||||
char pidfile[64];
|
||||
char *runtime;
|
||||
struct stat st;
|
||||
@ -850,7 +850,7 @@ static void *qpa_audio_init(Audiodev *dev)
|
||||
}
|
||||
|
||||
g = g_new0(paaudio, 1);
|
||||
server = popts->has_server ? popts->server : NULL;
|
||||
server = popts->server;
|
||||
|
||||
g->dev = dev;
|
||||
|
||||
|
@ -333,7 +333,7 @@ static int sndio_init(SndioVoice *self,
|
||||
unsigned int nch;
|
||||
int i, nfds;
|
||||
|
||||
dev_name = opts->has_dev ? opts->dev : SIO_DEVANY;
|
||||
dev_name = opts->dev ?: SIO_DEVANY;
|
||||
latency = opts->has_latency ? opts->latency : SNDIO_LATENCY_US;
|
||||
|
||||
/* open the device in non-blocking mode */
|
||||
|
@ -78,7 +78,7 @@ static int wav_init_out(HWVoiceOut *hw, struct audsettings *as,
|
||||
Audiodev *dev = drv_opaque;
|
||||
AudiodevWavOptions *wopts = &dev->u.wav;
|
||||
struct audsettings wav_as = audiodev_to_audsettings(dev->u.wav.out);
|
||||
const char *wav_path = wopts->has_path ? wopts->path : "qemu.wav";
|
||||
const char *wav_path = wopts->path ?: "qemu.wav";
|
||||
|
||||
stereo = wav_as.nchannels == 2;
|
||||
switch (wav_as.fmt) {
|
||||
|
@ -23,7 +23,12 @@
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#include <numaif.h>
|
||||
#include <numa.h>
|
||||
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT);
|
||||
/*
|
||||
* HOST_MEM_POLICY_PREFERRED may either translate to MPOL_PREFERRED or
|
||||
* MPOL_PREFERRED_MANY, see comments further below.
|
||||
*/
|
||||
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED);
|
||||
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND);
|
||||
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE);
|
||||
@ -346,6 +351,7 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
|
||||
* before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so
|
||||
* this doesn't catch hugepage case. */
|
||||
unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
|
||||
int mode = backend->policy;
|
||||
|
||||
/* check for invalid host-nodes and policies and give more verbose
|
||||
* error messages than mbind(). */
|
||||
@ -369,9 +375,18 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
|
||||
BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long));
|
||||
assert(maxnode <= MAX_NODES);
|
||||
|
||||
#ifdef HAVE_NUMA_HAS_PREFERRED_MANY
|
||||
if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) {
|
||||
/*
|
||||
* Replace with MPOL_PREFERRED_MANY otherwise the mbind() below
|
||||
* silently picks the first node.
|
||||
*/
|
||||
mode = MPOL_PREFERRED_MANY;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (maxnode &&
|
||||
mbind(ptr, sz, backend->policy, backend->host_nodes, maxnode + 1,
|
||||
flags)) {
|
||||
mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) {
|
||||
if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) {
|
||||
error_setg_errno(errp, errno,
|
||||
"cannot bind memory to host NUMA nodes");
|
||||
|
@ -259,12 +259,10 @@ tpm_passthrough_handle_device_opts(TPMPassthruState *tpm_pt, QemuOpts *opts)
|
||||
value = qemu_opt_get(opts, "cancel-path");
|
||||
if (value) {
|
||||
tpm_pt->options->cancel_path = g_strdup(value);
|
||||
tpm_pt->options->has_cancel_path = true;
|
||||
}
|
||||
|
||||
value = qemu_opt_get(opts, "path");
|
||||
if (value) {
|
||||
tpm_pt->options->has_path = true;
|
||||
tpm_pt->options->path = g_strdup(value);
|
||||
}
|
||||
|
||||
|
396
block.c
396
block.c
@ -93,8 +93,6 @@ static bool bdrv_recurse_has_child(BlockDriverState *bs,
|
||||
static void bdrv_replace_child_noperm(BdrvChild *child,
|
||||
BlockDriverState *new_bs);
|
||||
static void bdrv_remove_child(BdrvChild *child, Transaction *tran);
|
||||
static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs,
|
||||
Transaction *tran);
|
||||
|
||||
static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
|
||||
BlockReopenQueue *queue,
|
||||
@ -528,65 +526,24 @@ typedef struct CreateCo {
|
||||
Error *err;
|
||||
} CreateCo;
|
||||
|
||||
static void coroutine_fn bdrv_create_co_entry(void *opaque)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
CreateCo *cco = opaque;
|
||||
assert(cco->drv);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
ret = cco->drv->bdrv_co_create_opts(cco->drv,
|
||||
cco->filename, cco->opts, &local_err);
|
||||
error_propagate(&cco->err, local_err);
|
||||
cco->ret = ret;
|
||||
}
|
||||
|
||||
int bdrv_create(BlockDriver *drv, const char* filename,
|
||||
int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename,
|
||||
QemuOpts *opts, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
Coroutine *co;
|
||||
CreateCo cco = {
|
||||
.drv = drv,
|
||||
.filename = g_strdup(filename),
|
||||
.opts = opts,
|
||||
.ret = NOT_DONE,
|
||||
.err = NULL,
|
||||
};
|
||||
ERRP_GUARD();
|
||||
|
||||
if (!drv->bdrv_co_create_opts) {
|
||||
error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
|
||||
ret = -ENOTSUP;
|
||||
goto out;
|
||||
error_setg(errp, "Driver '%s' does not support image creation",
|
||||
drv->format_name);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (qemu_in_coroutine()) {
|
||||
/* Fast-path if already in coroutine context */
|
||||
bdrv_create_co_entry(&cco);
|
||||
} else {
|
||||
co = qemu_coroutine_create(bdrv_create_co_entry, &cco);
|
||||
qemu_coroutine_enter(co);
|
||||
while (cco.ret == NOT_DONE) {
|
||||
aio_poll(qemu_get_aio_context(), true);
|
||||
}
|
||||
}
|
||||
|
||||
ret = cco.ret;
|
||||
if (ret < 0) {
|
||||
if (cco.err) {
|
||||
error_propagate(errp, cco.err);
|
||||
} else {
|
||||
ret = drv->bdrv_co_create_opts(drv, filename, opts, errp);
|
||||
if (ret < 0 && !*errp) {
|
||||
error_setg_errno(errp, -ret, "Could not create image");
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
g_free(cco.filename);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -725,7 +682,8 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
|
||||
int coroutine_fn bdrv_co_create_file(const char *filename, QemuOpts *opts,
|
||||
Error **errp)
|
||||
{
|
||||
QemuOpts *protocol_opts;
|
||||
BlockDriver *drv;
|
||||
@ -766,7 +724,7 @@ int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = bdrv_create(drv, filename, protocol_opts, errp);
|
||||
ret = bdrv_co_create(drv, filename, protocol_opts, errp);
|
||||
out:
|
||||
qemu_opts_del(protocol_opts);
|
||||
qobject_unref(qdict);
|
||||
@ -1228,20 +1186,19 @@ static char *bdrv_child_get_parent_desc(BdrvChild *c)
|
||||
static void bdrv_child_cb_drained_begin(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *bs = child->opaque;
|
||||
bdrv_do_drained_begin_quiesce(bs, NULL, false);
|
||||
bdrv_do_drained_begin_quiesce(bs, NULL);
|
||||
}
|
||||
|
||||
static bool bdrv_child_cb_drained_poll(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *bs = child->opaque;
|
||||
return bdrv_drain_poll(bs, false, NULL, false);
|
||||
return bdrv_drain_poll(bs, NULL, false);
|
||||
}
|
||||
|
||||
static void bdrv_child_cb_drained_end(BdrvChild *child,
|
||||
int *drained_end_counter)
|
||||
static void bdrv_child_cb_drained_end(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *bs = child->opaque;
|
||||
bdrv_drained_end_no_poll(bs, drained_end_counter);
|
||||
bdrv_drained_end(bs);
|
||||
}
|
||||
|
||||
static int bdrv_child_cb_inactivate(BdrvChild *child)
|
||||
@ -1445,11 +1402,11 @@ static void bdrv_inherited_options(BdrvChildRole role, bool parent_is_format,
|
||||
*child_flags = flags;
|
||||
}
|
||||
|
||||
static void bdrv_child_cb_attach(BdrvChild *child)
|
||||
static void GRAPH_WRLOCK bdrv_child_cb_attach(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *bs = child->opaque;
|
||||
|
||||
assert_bdrv_graph_writable(bs);
|
||||
assert_bdrv_graph_writable();
|
||||
QLIST_INSERT_HEAD(&bs->children, child, next);
|
||||
if (bs->drv->is_filter || (child->role & BDRV_CHILD_FILTERED)) {
|
||||
/*
|
||||
@ -1485,11 +1442,9 @@ static void bdrv_child_cb_attach(BdrvChild *child)
|
||||
assert(!bs->file);
|
||||
bs->file = child;
|
||||
}
|
||||
|
||||
bdrv_apply_subtree_drain(child, bs);
|
||||
}
|
||||
|
||||
static void bdrv_child_cb_detach(BdrvChild *child)
|
||||
static void GRAPH_WRLOCK bdrv_child_cb_detach(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *bs = child->opaque;
|
||||
|
||||
@ -1497,9 +1452,7 @@ static void bdrv_child_cb_detach(BdrvChild *child)
|
||||
bdrv_backing_detach(child);
|
||||
}
|
||||
|
||||
bdrv_unapply_subtree_drain(child, bs);
|
||||
|
||||
assert_bdrv_graph_writable(bs);
|
||||
assert_bdrv_graph_writable();
|
||||
QLIST_REMOVE(child, next);
|
||||
if (child == bs->backing) {
|
||||
assert(child != bs->file);
|
||||
@ -1715,8 +1668,8 @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv,
|
||||
assert(is_power_of_2(bs->bl.request_alignment));
|
||||
|
||||
for (i = 0; i < bs->quiesce_counter; i++) {
|
||||
if (drv->bdrv_co_drain_begin) {
|
||||
drv->bdrv_co_drain_begin(bs);
|
||||
if (drv->bdrv_drain_begin) {
|
||||
drv->bdrv_drain_begin(bs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2414,6 +2367,20 @@ static void bdrv_replace_child_abort(void *opaque)
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
/* old_bs reference is transparently moved from @s to @s->child */
|
||||
if (!s->child->bs) {
|
||||
/*
|
||||
* The parents were undrained when removing old_bs from the child. New
|
||||
* requests can't have been made, though, because the child was empty.
|
||||
*
|
||||
* TODO Make bdrv_replace_child_noperm() transactionable to avoid
|
||||
* undraining the parent in the first place. Once this is done, having
|
||||
* new_bs drained when calling bdrv_replace_child_tran() is not a
|
||||
* requirement any more.
|
||||
*/
|
||||
bdrv_parent_drained_begin_single(s->child);
|
||||
assert(!bdrv_parent_drained_poll_single(s->child));
|
||||
}
|
||||
assert(s->child->quiesced_parent);
|
||||
bdrv_replace_child_noperm(s->child, s->old_bs);
|
||||
bdrv_unref(new_bs);
|
||||
}
|
||||
@ -2429,12 +2396,19 @@ static TransactionActionDrv bdrv_replace_child_drv = {
|
||||
*
|
||||
* Note: real unref of old_bs is done only on commit.
|
||||
*
|
||||
* Both @child->bs and @new_bs (if non-NULL) must be drained. @new_bs must be
|
||||
* kept drained until the transaction is completed.
|
||||
*
|
||||
* The function doesn't update permissions, caller is responsible for this.
|
||||
*/
|
||||
static void bdrv_replace_child_tran(BdrvChild *child, BlockDriverState *new_bs,
|
||||
Transaction *tran)
|
||||
{
|
||||
BdrvReplaceChildState *s = g_new(BdrvReplaceChildState, 1);
|
||||
|
||||
assert(child->quiesced_parent);
|
||||
assert(!new_bs || new_bs->quiesce_counter);
|
||||
|
||||
*s = (BdrvReplaceChildState) {
|
||||
.child = child,
|
||||
.old_bs = child->bs,
|
||||
@ -2523,7 +2497,11 @@ static int bdrv_node_refresh_perm(BlockDriverState *bs, BlockReopenQueue *q,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q,
|
||||
/*
|
||||
* @list is a product of bdrv_topological_dfs() (may be called several times) -
|
||||
* a topologically sorted subgraph.
|
||||
*/
|
||||
static int bdrv_do_refresh_perms(GSList *list, BlockReopenQueue *q,
|
||||
Transaction *tran, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
@ -2546,6 +2524,24 @@ static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* @list is any list of nodes. List is completed by all subtrees and
|
||||
* topologically sorted. It's not a problem if some node occurs in the @list
|
||||
* several times.
|
||||
*/
|
||||
static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q,
|
||||
Transaction *tran, Error **errp)
|
||||
{
|
||||
g_autoptr(GHashTable) found = g_hash_table_new(NULL, NULL);
|
||||
g_autoptr(GSList) refresh_list = NULL;
|
||||
|
||||
for ( ; list; list = list->next) {
|
||||
refresh_list = bdrv_topological_dfs(refresh_list, found, list->data);
|
||||
}
|
||||
|
||||
return bdrv_do_refresh_perms(refresh_list, q, tran, errp);
|
||||
}
|
||||
|
||||
void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm,
|
||||
uint64_t *shared_perm)
|
||||
{
|
||||
@ -2593,15 +2589,24 @@ char *bdrv_perm_names(uint64_t perm)
|
||||
}
|
||||
|
||||
|
||||
static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp)
|
||||
/* @tran is allowed to be NULL. In this case no rollback is possible */
|
||||
static int bdrv_refresh_perms(BlockDriverState *bs, Transaction *tran,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
Transaction *tran = tran_new();
|
||||
Transaction *local_tran = NULL;
|
||||
g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
ret = bdrv_list_refresh_perms(list, NULL, tran, errp);
|
||||
tran_finalize(tran, ret);
|
||||
if (!tran) {
|
||||
tran = local_tran = tran_new();
|
||||
}
|
||||
|
||||
ret = bdrv_do_refresh_perms(list, NULL, tran, errp);
|
||||
|
||||
if (local_tran) {
|
||||
tran_finalize(local_tran, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2617,7 +2622,7 @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
|
||||
|
||||
bdrv_child_set_perm(c, perm, shared, tran);
|
||||
|
||||
ret = bdrv_refresh_perms(c->bs, &local_err);
|
||||
ret = bdrv_refresh_perms(c->bs, tran, &local_err);
|
||||
|
||||
tran_finalize(tran, ret);
|
||||
|
||||
@ -2826,14 +2831,41 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm)
|
||||
return permissions[qapi_perm];
|
||||
}
|
||||
|
||||
/*
|
||||
* Replaces the node that a BdrvChild points to without updating permissions.
|
||||
*
|
||||
* If @new_bs is non-NULL, the parent of @child must already be drained through
|
||||
* @child.
|
||||
*/
|
||||
static void bdrv_replace_child_noperm(BdrvChild *child,
|
||||
BlockDriverState *new_bs)
|
||||
{
|
||||
BlockDriverState *old_bs = child->bs;
|
||||
int new_bs_quiesce_counter;
|
||||
int drain_saldo;
|
||||
|
||||
assert(!child->frozen);
|
||||
|
||||
/*
|
||||
* If we want to change the BdrvChild to point to a drained node as its new
|
||||
* child->bs, we need to make sure that its new parent is drained, too. In
|
||||
* other words, either child->quiesce_parent must already be true or we must
|
||||
* be able to set it and keep the parent's quiesce_counter consistent with
|
||||
* that, but without polling or starting new requests (this function
|
||||
* guarantees that it doesn't poll, and starting new requests would be
|
||||
* against the invariants of drain sections).
|
||||
*
|
||||
* To keep things simple, we pick the first option (child->quiesce_parent
|
||||
* must already be true). We also generalise the rule a bit to make it
|
||||
* easier to verify in callers and more likely to be covered in test cases:
|
||||
* The parent must be quiesced through this child even if new_bs isn't
|
||||
* currently drained.
|
||||
*
|
||||
* The only exception is for callers that always pass new_bs == NULL. In
|
||||
* this case, we obviously never need to consider the case of a drained
|
||||
* new_bs, so we can keep the callers simpler by allowing them not to drain
|
||||
* the parent.
|
||||
*/
|
||||
assert(!new_bs || child->quiesced_parent);
|
||||
assert(old_bs != new_bs);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
@ -2841,59 +2873,33 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
|
||||
assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs));
|
||||
}
|
||||
|
||||
new_bs_quiesce_counter = (new_bs ? new_bs->quiesce_counter : 0);
|
||||
drain_saldo = new_bs_quiesce_counter - child->parent_quiesce_counter;
|
||||
|
||||
/*
|
||||
* If the new child node is drained but the old one was not, flush
|
||||
* all outstanding requests to the old child node.
|
||||
*/
|
||||
while (drain_saldo > 0 && child->klass->drained_begin) {
|
||||
bdrv_parent_drained_begin_single(child, true);
|
||||
drain_saldo--;
|
||||
}
|
||||
|
||||
/* TODO Pull this up into the callers to avoid polling here */
|
||||
bdrv_graph_wrlock();
|
||||
if (old_bs) {
|
||||
/* Detach first so that the recursive drain sections coming from @child
|
||||
* are already gone and we only end the drain sections that came from
|
||||
* elsewhere. */
|
||||
if (child->klass->detach) {
|
||||
child->klass->detach(child);
|
||||
}
|
||||
assert_bdrv_graph_writable(old_bs);
|
||||
QLIST_REMOVE(child, next_parent);
|
||||
}
|
||||
|
||||
child->bs = new_bs;
|
||||
|
||||
if (new_bs) {
|
||||
assert_bdrv_graph_writable(new_bs);
|
||||
QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent);
|
||||
|
||||
/*
|
||||
* Detaching the old node may have led to the new node's
|
||||
* quiesce_counter having been decreased. Not a problem, we
|
||||
* just need to recognize this here and then invoke
|
||||
* drained_end appropriately more often.
|
||||
*/
|
||||
assert(new_bs->quiesce_counter <= new_bs_quiesce_counter);
|
||||
drain_saldo += new_bs->quiesce_counter - new_bs_quiesce_counter;
|
||||
|
||||
/* Attach only after starting new drained sections, so that recursive
|
||||
* drain sections coming from @child don't get an extra .drained_begin
|
||||
* callback. */
|
||||
if (child->klass->attach) {
|
||||
child->klass->attach(child);
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
/*
|
||||
* If the old child node was drained but the new one is not, allow
|
||||
* requests to come in only after the new node has been attached.
|
||||
* If the parent was drained through this BdrvChild previously, but new_bs
|
||||
* is not drained, allow requests to come in only after the new node has
|
||||
* been attached.
|
||||
*/
|
||||
while (drain_saldo < 0 && child->klass->drained_end) {
|
||||
new_bs_quiesce_counter = (new_bs ? new_bs->quiesce_counter : 0);
|
||||
if (!new_bs_quiesce_counter && child->quiesced_parent) {
|
||||
bdrv_parent_drained_end_single(child);
|
||||
drain_saldo++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3026,6 +3032,24 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
|
||||
}
|
||||
|
||||
bdrv_ref(child_bs);
|
||||
/*
|
||||
* Let every new BdrvChild start with a drained parent. Inserting the child
|
||||
* in the graph with bdrv_replace_child_noperm() will undrain it if
|
||||
* @child_bs is not drained.
|
||||
*
|
||||
* The child was only just created and is not yet visible in global state
|
||||
* until bdrv_replace_child_noperm() inserts it into the graph, so nobody
|
||||
* could have sent requests and polling is not necessary.
|
||||
*
|
||||
* Note that this means that the parent isn't fully drained yet, we only
|
||||
* stop new requests from coming in. This is fine, we don't care about the
|
||||
* old requests here, they are not for this child. If another place enters a
|
||||
* drain section for the same parent, but wants it to be fully quiesced, it
|
||||
* will not run most of the the code in .drained_begin() again (which is not
|
||||
* a problem, we already did this), but it will still poll until the parent
|
||||
* is fully quiesced, so it will not be negatively affected either.
|
||||
*/
|
||||
bdrv_parent_drained_begin_single(new_child);
|
||||
bdrv_replace_child_noperm(new_child, child_bs);
|
||||
|
||||
BdrvAttachChildCommonState *s = g_new(BdrvAttachChildCommonState, 1);
|
||||
@ -3070,30 +3094,6 @@ static BdrvChild *bdrv_attach_child_noperm(BlockDriverState *parent_bs,
|
||||
tran, errp);
|
||||
}
|
||||
|
||||
static void bdrv_detach_child(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *old_bs = child->bs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_replace_child_noperm(child, NULL);
|
||||
bdrv_child_free(child);
|
||||
|
||||
if (old_bs) {
|
||||
/*
|
||||
* Update permissions for old node. We're just taking a parent away, so
|
||||
* we're loosening restrictions. Errors of permission update are not
|
||||
* fatal in this case, ignore them.
|
||||
*/
|
||||
bdrv_refresh_perms(old_bs, NULL);
|
||||
|
||||
/*
|
||||
* When the parent requiring a non-default AioContext is removed, the
|
||||
* node moves back to the main AioContext
|
||||
*/
|
||||
bdrv_try_change_aio_context(old_bs, qemu_get_aio_context(), NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function steals the reference to child_bs from the caller.
|
||||
* That reference is later dropped by bdrv_root_unref_child().
|
||||
@ -3125,7 +3125,7 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = bdrv_refresh_perms(child_bs, errp);
|
||||
ret = bdrv_refresh_perms(child_bs, tran, errp);
|
||||
|
||||
out:
|
||||
tran_finalize(tran, ret);
|
||||
@ -3166,7 +3166,7 @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = bdrv_refresh_perms(parent_bs, errp);
|
||||
ret = bdrv_refresh_perms(parent_bs, tran, errp);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@ -3182,12 +3182,28 @@ out:
|
||||
/* Callers must ensure that child->frozen is false. */
|
||||
void bdrv_root_unref_child(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *child_bs;
|
||||
BlockDriverState *child_bs = child->bs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_replace_child_noperm(child, NULL);
|
||||
bdrv_child_free(child);
|
||||
|
||||
if (child_bs) {
|
||||
/*
|
||||
* Update permissions for old node. We're just taking a parent away, so
|
||||
* we're loosening restrictions. Errors of permission update are not
|
||||
* fatal in this case, ignore them.
|
||||
*/
|
||||
bdrv_refresh_perms(child_bs, NULL, NULL);
|
||||
|
||||
/*
|
||||
* When the parent requiring a non-default AioContext is removed, the
|
||||
* node moves back to the main AioContext
|
||||
*/
|
||||
bdrv_try_change_aio_context(child_bs, qemu_get_aio_context(), NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
child_bs = child->bs;
|
||||
bdrv_detach_child(child);
|
||||
bdrv_unref(child_bs);
|
||||
}
|
||||
|
||||
@ -3406,24 +3422,35 @@ static int bdrv_set_backing_noperm(BlockDriverState *bs,
|
||||
return bdrv_set_file_or_backing_noperm(bs, backing_hd, true, tran, errp);
|
||||
}
|
||||
|
||||
int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
|
||||
int bdrv_set_backing_hd_drained(BlockDriverState *bs,
|
||||
BlockDriverState *backing_hd,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
Transaction *tran = tran_new();
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_drained_begin(bs);
|
||||
assert(bs->quiesce_counter > 0);
|
||||
|
||||
ret = bdrv_set_backing_noperm(bs, backing_hd, tran, errp);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = bdrv_refresh_perms(bs, errp);
|
||||
ret = bdrv_refresh_perms(bs, tran, errp);
|
||||
out:
|
||||
tran_finalize(tran, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_drained_begin(bs);
|
||||
ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp);
|
||||
bdrv_drained_end(bs);
|
||||
|
||||
return ret;
|
||||
@ -4153,7 +4180,9 @@ static bool bdrv_recurse_has_child(BlockDriverState *bs,
|
||||
* returns a pointer to bs_queue, which is either the newly allocated
|
||||
* bs_queue, or the existing bs_queue being used.
|
||||
*
|
||||
* bs must be drained between bdrv_reopen_queue() and bdrv_reopen_multiple().
|
||||
* bs is drained here and undrained by bdrv_reopen_queue_free().
|
||||
*
|
||||
* To be called with bs->aio_context locked.
|
||||
*/
|
||||
static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
|
||||
BlockDriverState *bs,
|
||||
@ -4173,12 +4202,10 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
|
||||
int flags;
|
||||
QemuOpts *opts;
|
||||
|
||||
/* Make sure that the caller remembered to use a drained section. This is
|
||||
* important to avoid graph changes between the recursive queuing here and
|
||||
* bdrv_reopen_multiple(). */
|
||||
assert(bs->quiesce_counter > 0);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_drained_begin(bs);
|
||||
|
||||
if (bs_queue == NULL) {
|
||||
bs_queue = g_new0(BlockReopenQueue, 1);
|
||||
QTAILQ_INIT(bs_queue);
|
||||
@ -4312,6 +4339,7 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
|
||||
return bs_queue;
|
||||
}
|
||||
|
||||
/* To be called with bs->aio_context locked */
|
||||
BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
|
||||
BlockDriverState *bs,
|
||||
QDict *options, bool keep_old_opts)
|
||||
@ -4328,6 +4356,12 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue)
|
||||
if (bs_queue) {
|
||||
BlockReopenQueueEntry *bs_entry, *next;
|
||||
QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
|
||||
AioContext *ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_drained_end(bs_entry->state.bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
qobject_unref(bs_entry->state.explicit_options);
|
||||
qobject_unref(bs_entry->state.options);
|
||||
g_free(bs_entry);
|
||||
@ -4361,7 +4395,6 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
||||
BlockReopenQueueEntry *bs_entry, *next;
|
||||
AioContext *ctx;
|
||||
Transaction *tran = tran_new();
|
||||
g_autoptr(GHashTable) found = NULL;
|
||||
g_autoptr(GSList) refresh_list = NULL;
|
||||
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
@ -4391,18 +4424,15 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
||||
bs_entry->prepared = true;
|
||||
}
|
||||
|
||||
found = g_hash_table_new(NULL, NULL);
|
||||
QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
|
||||
BDRVReopenState *state = &bs_entry->state;
|
||||
|
||||
refresh_list = bdrv_topological_dfs(refresh_list, found, state->bs);
|
||||
refresh_list = g_slist_prepend(refresh_list, state->bs);
|
||||
if (state->old_backing_bs) {
|
||||
refresh_list = bdrv_topological_dfs(refresh_list, found,
|
||||
state->old_backing_bs);
|
||||
refresh_list = g_slist_prepend(refresh_list, state->old_backing_bs);
|
||||
}
|
||||
if (state->old_file_bs) {
|
||||
refresh_list = bdrv_topological_dfs(refresh_list, found,
|
||||
state->old_file_bs);
|
||||
refresh_list = g_slist_prepend(refresh_list, state->old_file_bs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4475,18 +4505,16 @@ int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_subtree_drained_begin(bs);
|
||||
queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts);
|
||||
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts);
|
||||
ret = bdrv_reopen_multiple(queue, errp);
|
||||
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
bdrv_subtree_drained_end(bs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -5067,23 +5095,24 @@ static void bdrv_remove_child(BdrvChild *child, Transaction *tran)
|
||||
}
|
||||
|
||||
if (child->bs) {
|
||||
BlockDriverState *bs = child->bs;
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_replace_child_tran(child, NULL, tran);
|
||||
bdrv_drained_end(bs);
|
||||
}
|
||||
|
||||
tran_add(tran, &bdrv_remove_child_drv, child);
|
||||
}
|
||||
|
||||
/*
|
||||
* A function to remove backing-chain child of @bs if exists: cow child for
|
||||
* format nodes (always .backing) and filter child for filters (may be .file or
|
||||
* .backing)
|
||||
*/
|
||||
static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs,
|
||||
Transaction *tran)
|
||||
static void undrain_on_clean_cb(void *opaque)
|
||||
{
|
||||
bdrv_remove_child(bdrv_filter_or_cow_child(bs), tran);
|
||||
bdrv_drained_end(opaque);
|
||||
}
|
||||
|
||||
static TransactionActionDrv undrain_on_clean = {
|
||||
.clean = undrain_on_clean_cb,
|
||||
};
|
||||
|
||||
static int bdrv_replace_node_noperm(BlockDriverState *from,
|
||||
BlockDriverState *to,
|
||||
bool auto_skip, Transaction *tran,
|
||||
@ -5093,6 +5122,11 @@ static int bdrv_replace_node_noperm(BlockDriverState *from,
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_drained_begin(from);
|
||||
bdrv_drained_begin(to);
|
||||
tran_add(tran, &undrain_on_clean, from);
|
||||
tran_add(tran, &undrain_on_clean, to);
|
||||
|
||||
QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) {
|
||||
assert(c->bs == from);
|
||||
if (!should_update_child(c, to)) {
|
||||
@ -5130,7 +5164,6 @@ static int bdrv_replace_node_common(BlockDriverState *from,
|
||||
Error **errp)
|
||||
{
|
||||
Transaction *tran = tran_new();
|
||||
g_autoptr(GHashTable) found = NULL;
|
||||
g_autoptr(GSList) refresh_list = NULL;
|
||||
BlockDriverState *to_cow_parent = NULL;
|
||||
int ret;
|
||||
@ -5168,13 +5201,11 @@ static int bdrv_replace_node_common(BlockDriverState *from,
|
||||
}
|
||||
|
||||
if (detach_subchain) {
|
||||
bdrv_remove_filter_or_cow_child(to_cow_parent, tran);
|
||||
bdrv_remove_child(bdrv_filter_or_cow_child(to_cow_parent), tran);
|
||||
}
|
||||
|
||||
found = g_hash_table_new(NULL, NULL);
|
||||
|
||||
refresh_list = bdrv_topological_dfs(refresh_list, found, to);
|
||||
refresh_list = bdrv_topological_dfs(refresh_list, found, from);
|
||||
refresh_list = g_slist_prepend(refresh_list, to);
|
||||
refresh_list = g_slist_prepend(refresh_list, from);
|
||||
|
||||
ret = bdrv_list_refresh_perms(refresh_list, NULL, tran, errp);
|
||||
if (ret < 0) {
|
||||
@ -5244,7 +5275,7 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = bdrv_refresh_perms(bs_new, errp);
|
||||
ret = bdrv_refresh_perms(bs_new, tran, errp);
|
||||
out:
|
||||
tran_finalize(tran, ret);
|
||||
|
||||
@ -5259,7 +5290,6 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
|
||||
{
|
||||
int ret;
|
||||
Transaction *tran = tran_new();
|
||||
g_autoptr(GHashTable) found = NULL;
|
||||
g_autoptr(GSList) refresh_list = NULL;
|
||||
BlockDriverState *old_bs = child->bs;
|
||||
|
||||
@ -5271,9 +5301,8 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
|
||||
|
||||
bdrv_replace_child_tran(child, new_bs, tran);
|
||||
|
||||
found = g_hash_table_new(NULL, NULL);
|
||||
refresh_list = bdrv_topological_dfs(refresh_list, found, old_bs);
|
||||
refresh_list = bdrv_topological_dfs(refresh_list, found, new_bs);
|
||||
refresh_list = g_slist_prepend(refresh_list, old_bs);
|
||||
refresh_list = g_slist_prepend(refresh_list, new_bs);
|
||||
|
||||
ret = bdrv_list_refresh_perms(refresh_list, NULL, tran, errp);
|
||||
|
||||
@ -5373,6 +5402,7 @@ int coroutine_fn bdrv_co_check(BlockDriverState *bs,
|
||||
BdrvCheckResult *res, BdrvCheckMode fix)
|
||||
{
|
||||
IO_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
if (bs->drv == NULL) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
@ -5595,7 +5625,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_ref(top);
|
||||
bdrv_subtree_drained_begin(top);
|
||||
bdrv_drained_begin(base);
|
||||
|
||||
if (!top->drv || !base->drv) {
|
||||
goto exit;
|
||||
@ -5668,7 +5698,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
||||
|
||||
ret = 0;
|
||||
exit:
|
||||
bdrv_subtree_drained_end(top);
|
||||
bdrv_drained_end(base);
|
||||
bdrv_unref(top);
|
||||
return ret;
|
||||
}
|
||||
@ -6544,7 +6574,7 @@ int bdrv_activate(BlockDriverState *bs, Error **errp)
|
||||
*/
|
||||
if (bs->open_flags & BDRV_O_INACTIVE) {
|
||||
bs->open_flags &= ~BDRV_O_INACTIVE;
|
||||
ret = bdrv_refresh_perms(bs, errp);
|
||||
ret = bdrv_refresh_perms(bs, NULL, errp);
|
||||
if (ret < 0) {
|
||||
bs->open_flags |= BDRV_O_INACTIVE;
|
||||
return ret;
|
||||
@ -6588,6 +6618,7 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
|
||||
IO_CODE();
|
||||
|
||||
assert(!(bs->open_flags & BDRV_O_INACTIVE));
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
if (bs->drv->bdrv_co_invalidate_cache) {
|
||||
bs->drv->bdrv_co_invalidate_cache(bs, &local_err);
|
||||
@ -6689,7 +6720,7 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
|
||||
* We only tried to loosen restrictions, so errors are not fatal, ignore
|
||||
* them.
|
||||
*/
|
||||
bdrv_refresh_perms(bs, NULL);
|
||||
bdrv_refresh_perms(bs, NULL, NULL);
|
||||
|
||||
/* Recursively inactivate children */
|
||||
QLIST_FOREACH(child, &bs->children, next) {
|
||||
@ -6894,6 +6925,10 @@ bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must not be called while holding the lock of an AioContext other than the
|
||||
* current one.
|
||||
*/
|
||||
void bdrv_img_create(const char *filename, const char *fmt,
|
||||
const char *base_filename, const char *base_fmt,
|
||||
char *options, uint64_t img_size, int flags, bool quiet,
|
||||
@ -7181,7 +7216,6 @@ static void bdrv_detach_aio_context(BlockDriverState *bs)
|
||||
if (bs->quiesce_counter) {
|
||||
aio_enable_external(bs->aio_context);
|
||||
}
|
||||
assert_bdrv_graph_writable(bs);
|
||||
bs->aio_context = NULL;
|
||||
}
|
||||
|
||||
@ -7195,7 +7229,6 @@ static void bdrv_attach_aio_context(BlockDriverState *bs,
|
||||
aio_disable_external(new_context);
|
||||
}
|
||||
|
||||
assert_bdrv_graph_writable(bs);
|
||||
bs->aio_context = new_context;
|
||||
|
||||
if (bs->drv && bs->drv->bdrv_attach_aio_context) {
|
||||
@ -7276,7 +7309,6 @@ static void bdrv_set_aio_context_commit(void *opaque)
|
||||
BlockDriverState *bs = (BlockDriverState *) state->bs;
|
||||
AioContext *new_context = state->new_ctx;
|
||||
AioContext *old_context = bdrv_get_aio_context(bs);
|
||||
assert_bdrv_graph_writable(bs);
|
||||
|
||||
/*
|
||||
* Take the old AioContex when detaching it from bs.
|
||||
|
@ -297,9 +297,7 @@ static int read_config(BDRVBlkdebugState *s, const char *filename,
|
||||
}
|
||||
}
|
||||
|
||||
qemu_config_parse_qdict(options, config_groups, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
if (!qemu_config_parse_qdict(options, config_groups, errp)) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ static void blk_root_inherit_options(BdrvChildRole role, bool parent_is_format,
|
||||
}
|
||||
static void blk_root_drained_begin(BdrvChild *child);
|
||||
static bool blk_root_drained_poll(BdrvChild *child);
|
||||
static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter);
|
||||
static void blk_root_drained_end(BdrvChild *child);
|
||||
|
||||
static void blk_root_change_media(BdrvChild *child, bool load);
|
||||
static void blk_root_resize(BdrvChild *child);
|
||||
@ -1424,6 +1424,27 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
|
||||
return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags);
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_block_status_above(BlockBackend *blk,
|
||||
BlockDriverState *base,
|
||||
int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_co_block_status_above(blk_bs(blk), base, offset, bytes, pnum,
|
||||
map, file);
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_co_is_allocated_above(blk_bs(blk), base, include_base, offset,
|
||||
bytes, pnum);
|
||||
}
|
||||
|
||||
typedef struct BlkRwCo {
|
||||
BlockBackend *blk;
|
||||
int64_t offset;
|
||||
@ -1860,7 +1881,7 @@ static void send_qmp_error_event(BlockBackend *blk,
|
||||
BlockDriverState *bs = blk_bs(blk);
|
||||
|
||||
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
|
||||
qapi_event_send_block_io_error(blk_name(blk), !!bs,
|
||||
qapi_event_send_block_io_error(blk_name(blk),
|
||||
bs ? bdrv_get_node_name(bs) : NULL, optype,
|
||||
action, blk_iostatus_is_enabled(blk),
|
||||
error == ENOSPC, strerror(error));
|
||||
@ -2556,7 +2577,7 @@ static bool blk_root_drained_poll(BdrvChild *child)
|
||||
return busy || !!blk->in_flight;
|
||||
}
|
||||
|
||||
static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
|
||||
static void blk_root_drained_end(BdrvChild *child)
|
||||
{
|
||||
BlockBackend *blk = child->opaque;
|
||||
assert(blk->quiesce_counter);
|
||||
|
@ -577,7 +577,8 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int block_copy_block_status(BlockCopyState *s, int64_t offset,
|
||||
static coroutine_fn int block_copy_block_status(BlockCopyState *s,
|
||||
int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int64_t num;
|
||||
@ -590,7 +591,7 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset,
|
||||
base = NULL;
|
||||
}
|
||||
|
||||
ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
|
||||
ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num,
|
||||
NULL, NULL);
|
||||
if (ret < 0 || num < s->cluster_size) {
|
||||
/*
|
||||
@ -613,7 +614,8 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset,
|
||||
* Check if the cluster starting at offset is allocated or not.
|
||||
* return via pnum the number of contiguous clusters sharing this allocation.
|
||||
*/
|
||||
static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
|
||||
static int coroutine_fn block_copy_is_cluster_allocated(BlockCopyState *s,
|
||||
int64_t offset,
|
||||
int64_t *pnum)
|
||||
{
|
||||
BlockDriverState *bs = s->source->bs;
|
||||
@ -624,7 +626,7 @@ static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
|
||||
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
|
||||
|
||||
while (true) {
|
||||
ret = bdrv_is_allocated(bs, offset, bytes, &count);
|
||||
ret = bdrv_co_is_allocated(bs, offset, bytes, &count);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -669,8 +671,9 @@ void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes)
|
||||
* @return 0 when the cluster at @offset was unallocated,
|
||||
* 1 otherwise, and -ret on error.
|
||||
*/
|
||||
int64_t block_copy_reset_unallocated(BlockCopyState *s,
|
||||
int64_t offset, int64_t *count)
|
||||
int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
|
||||
int64_t offset,
|
||||
int64_t *count)
|
||||
{
|
||||
int ret;
|
||||
int64_t clusters, bytes;
|
||||
|
@ -30,20 +30,17 @@
|
||||
|
||||
/* Base structure for argument packing structures */
|
||||
typedef struct BdrvPollCo {
|
||||
BlockDriverState *bs;
|
||||
AioContext *ctx;
|
||||
bool in_progress;
|
||||
int ret;
|
||||
Coroutine *co; /* Keep pointer here for debugging */
|
||||
} BdrvPollCo;
|
||||
|
||||
static inline int bdrv_poll_co(BdrvPollCo *s)
|
||||
static inline void bdrv_poll_co(BdrvPollCo *s)
|
||||
{
|
||||
assert(!qemu_in_coroutine());
|
||||
|
||||
bdrv_coroutine_enter(s->bs, s->co);
|
||||
BDRV_POLL_WHILE(s->bs, s->in_progress);
|
||||
|
||||
return s->ret;
|
||||
aio_co_enter(s->ctx, s->co);
|
||||
AIO_WAIT_WHILE(s->ctx, s->in_progress);
|
||||
}
|
||||
|
||||
#endif /* BLOCK_BLOCK_GEN_H */
|
||||
|
@ -155,7 +155,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
|
||||
break;
|
||||
}
|
||||
/* Copy if allocated above the base */
|
||||
ret = bdrv_is_allocated_above(blk_bs(s->top), s->base_overlay, true,
|
||||
ret = blk_co_is_allocated_above(s->top, s->base_overlay, true,
|
||||
offset, COMMIT_BUFFER_SIZE, &n);
|
||||
copy = (ret > 0);
|
||||
trace_commit_one_iteration(s, offset, n, ret);
|
||||
|
@ -432,7 +432,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (opts->has_bitmap) {
|
||||
if (opts->bitmap) {
|
||||
bitmap = block_dirty_bitmap_lookup(opts->bitmap->node,
|
||||
opts->bitmap->name, NULL, errp);
|
||||
if (!bitmap) {
|
||||
@ -522,7 +522,6 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
|
||||
BlockCopyState **bcs,
|
||||
Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
BDRVCopyBeforeWriteState *state;
|
||||
BlockDriverState *top;
|
||||
QDict *opts;
|
||||
|
@ -37,9 +37,11 @@
|
||||
* the I/O API.
|
||||
*/
|
||||
|
||||
int coroutine_fn bdrv_co_check(BlockDriverState *bs,
|
||||
BdrvCheckResult *res, BdrvCheckMode fix);
|
||||
int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_co_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
|
||||
|
||||
int coroutine_fn
|
||||
bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
@ -53,10 +55,11 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState **file,
|
||||
int *depth);
|
||||
|
||||
int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov, int64_t pos);
|
||||
int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov, int64_t pos);
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
|
||||
|
||||
int coroutine_fn
|
||||
nbd_co_do_establish_connection(BlockDriverState *bs, bool blocking,
|
||||
@ -71,7 +74,7 @@ nbd_co_do_establish_connection(BlockDriverState *bs, bool blocking,
|
||||
* the "I/O or GS" API.
|
||||
*/
|
||||
|
||||
int generated_co_wrapper
|
||||
int co_wrapper_mixed_bdrv_rdlock
|
||||
bdrv_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
bool include_base,
|
||||
@ -82,7 +85,7 @@ bdrv_common_block_status_above(BlockDriverState *bs,
|
||||
int64_t *map,
|
||||
BlockDriverState **file,
|
||||
int *depth);
|
||||
int generated_co_wrapper
|
||||
int co_wrapper_mixed
|
||||
nbd_do_establish_connection(BlockDriverState *bs, bool blocking, Error **errp);
|
||||
|
||||
#endif /* BLOCK_COROUTINES_H */
|
||||
|
@ -703,7 +703,7 @@ static int coroutine_fn block_crypto_co_create_opts_luks(BlockDriver *drv,
|
||||
}
|
||||
|
||||
/* Create protocol layer */
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ void bdrv_release_named_dirty_bitmaps(BlockDriverState *bs)
|
||||
* not fail.
|
||||
* This function doesn't release corresponding BdrvDirtyBitmap.
|
||||
*/
|
||||
static int coroutine_fn
|
||||
int coroutine_fn
|
||||
bdrv_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
Error **errp)
|
||||
{
|
||||
@ -399,45 +399,6 @@ bdrv_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef struct BdrvRemovePersistentDirtyBitmapCo {
|
||||
BlockDriverState *bs;
|
||||
const char *name;
|
||||
Error **errp;
|
||||
int ret;
|
||||
} BdrvRemovePersistentDirtyBitmapCo;
|
||||
|
||||
static void coroutine_fn
|
||||
bdrv_co_remove_persistent_dirty_bitmap_entry(void *opaque)
|
||||
{
|
||||
BdrvRemovePersistentDirtyBitmapCo *s = opaque;
|
||||
|
||||
s->ret = bdrv_co_remove_persistent_dirty_bitmap(s->bs, s->name, s->errp);
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
int bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
Error **errp)
|
||||
{
|
||||
if (qemu_in_coroutine()) {
|
||||
return bdrv_co_remove_persistent_dirty_bitmap(bs, name, errp);
|
||||
} else {
|
||||
Coroutine *co;
|
||||
BdrvRemovePersistentDirtyBitmapCo s = {
|
||||
.bs = bs,
|
||||
.name = name,
|
||||
.errp = errp,
|
||||
.ret = -EINPROGRESS,
|
||||
};
|
||||
|
||||
co = qemu_coroutine_create(bdrv_co_remove_persistent_dirty_bitmap_entry,
|
||||
&s);
|
||||
bdrv_coroutine_enter(bs, co);
|
||||
BDRV_POLL_WHILE(bs, s.ret == -EINPROGRESS);
|
||||
|
||||
return s.ret;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
bdrv_supports_persistent_dirty_bitmap(BlockDriverState *bs)
|
||||
{
|
||||
@ -447,7 +408,7 @@ bdrv_supports_persistent_dirty_bitmap(BlockDriverState *bs)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool coroutine_fn
|
||||
bool coroutine_fn
|
||||
bdrv_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
uint32_t granularity, Error **errp)
|
||||
{
|
||||
@ -470,51 +431,6 @@ bdrv_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
return drv->bdrv_co_can_store_new_dirty_bitmap(bs, name, granularity, errp);
|
||||
}
|
||||
|
||||
typedef struct BdrvCanStoreNewDirtyBitmapCo {
|
||||
BlockDriverState *bs;
|
||||
const char *name;
|
||||
uint32_t granularity;
|
||||
Error **errp;
|
||||
bool ret;
|
||||
|
||||
bool in_progress;
|
||||
} BdrvCanStoreNewDirtyBitmapCo;
|
||||
|
||||
static void coroutine_fn bdrv_co_can_store_new_dirty_bitmap_entry(void *opaque)
|
||||
{
|
||||
BdrvCanStoreNewDirtyBitmapCo *s = opaque;
|
||||
|
||||
s->ret = bdrv_co_can_store_new_dirty_bitmap(s->bs, s->name, s->granularity,
|
||||
s->errp);
|
||||
s->in_progress = false;
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
uint32_t granularity, Error **errp)
|
||||
{
|
||||
IO_CODE();
|
||||
if (qemu_in_coroutine()) {
|
||||
return bdrv_co_can_store_new_dirty_bitmap(bs, name, granularity, errp);
|
||||
} else {
|
||||
Coroutine *co;
|
||||
BdrvCanStoreNewDirtyBitmapCo s = {
|
||||
.bs = bs,
|
||||
.name = name,
|
||||
.granularity = granularity,
|
||||
.errp = errp,
|
||||
.in_progress = true,
|
||||
};
|
||||
|
||||
co = qemu_coroutine_create(bdrv_co_can_store_new_dirty_bitmap_entry,
|
||||
&s);
|
||||
bdrv_coroutine_enter(bs, co);
|
||||
BDRV_POLL_WHILE(bs, s.in_progress);
|
||||
|
||||
return s.ret;
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap)
|
||||
{
|
||||
bdrv_dirty_bitmaps_lock(bitmap->bs);
|
||||
@ -541,7 +457,6 @@ BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
|
||||
|
||||
info->count = bdrv_get_dirty_count(bm);
|
||||
info->granularity = bdrv_dirty_bitmap_granularity(bm);
|
||||
info->has_name = !!bm->name;
|
||||
info->name = g_strdup(bm->name);
|
||||
info->recording = bdrv_dirty_bitmap_recording(bm);
|
||||
info->busy = bdrv_dirty_bitmap_busy(bm);
|
||||
|
@ -114,7 +114,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
if (export->has_iothread) {
|
||||
if (export->iothread) {
|
||||
IOThread *iothread;
|
||||
AioContext *new_ctx;
|
||||
Error **set_context_errp;
|
||||
|
@ -265,8 +265,7 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
|
||||
}
|
||||
vblk_exp->num_queues = num_queues;
|
||||
vblk_exp->handler.blk = exp->blk;
|
||||
vblk_exp->handler.serial = g_strdup(vblk_opts->has_serial ?
|
||||
vblk_opts->serial : "");
|
||||
vblk_exp->handler.serial = g_strdup(vblk_opts->serial ?: "");
|
||||
vblk_exp->handler.logical_block_size = logical_block_size;
|
||||
vblk_exp->handler.writable = opts->writable;
|
||||
|
||||
|
@ -1229,9 +1229,7 @@ static int hdev_get_max_segments(int fd, struct stat *st)
|
||||
ret = -errno;
|
||||
goto out;
|
||||
}
|
||||
do {
|
||||
ret = read(sysfd, buf, sizeof(buf) - 1);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
ret = RETRY_ON_EINTR(read(sysfd, buf, sizeof(buf) - 1));
|
||||
if (ret < 0) {
|
||||
ret = -errno;
|
||||
goto out;
|
||||
@ -1379,9 +1377,9 @@ static int handle_aiocb_ioctl(void *opaque)
|
||||
RawPosixAIOData *aiocb = opaque;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = ioctl(aiocb->aio_fildes, aiocb->ioctl.cmd, aiocb->ioctl.buf);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
ret = RETRY_ON_EINTR(
|
||||
ioctl(aiocb->aio_fildes, aiocb->ioctl.cmd, aiocb->ioctl.buf)
|
||||
);
|
||||
if (ret == -1) {
|
||||
return -errno;
|
||||
}
|
||||
@ -1463,18 +1461,17 @@ static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb)
|
||||
{
|
||||
ssize_t len;
|
||||
|
||||
do {
|
||||
if (aiocb->aio_type & QEMU_AIO_WRITE)
|
||||
len = qemu_pwritev(aiocb->aio_fildes,
|
||||
len = RETRY_ON_EINTR(
|
||||
(aiocb->aio_type & QEMU_AIO_WRITE) ?
|
||||
qemu_pwritev(aiocb->aio_fildes,
|
||||
aiocb->io.iov,
|
||||
aiocb->io.niov,
|
||||
aiocb->aio_offset);
|
||||
else
|
||||
len = qemu_preadv(aiocb->aio_fildes,
|
||||
aiocb->aio_offset) :
|
||||
qemu_preadv(aiocb->aio_fildes,
|
||||
aiocb->io.iov,
|
||||
aiocb->io.niov,
|
||||
aiocb->aio_offset);
|
||||
} while (len == -1 && errno == EINTR);
|
||||
aiocb->aio_offset)
|
||||
);
|
||||
|
||||
if (len == -1) {
|
||||
return -errno;
|
||||
@ -1899,9 +1896,7 @@ static int allocate_first_block(int fd, size_t max_size)
|
||||
buf = qemu_memalign(max_align, write_size);
|
||||
memset(buf, 0, write_size);
|
||||
|
||||
do {
|
||||
n = pwrite(fd, buf, write_size, 0);
|
||||
} while (n == -1 && errno == EINTR);
|
||||
n = RETRY_ON_EINTR(pwrite(fd, buf, write_size, 0));
|
||||
|
||||
ret = (n == -1) ? -errno : 0;
|
||||
|
||||
|
@ -830,7 +830,6 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
|
||||
s->logfile = g_strdup(logfile ? logfile : GLUSTER_LOGFILE_DEFAULT);
|
||||
|
||||
gconf->logfile = g_strdup(s->logfile);
|
||||
gconf->has_logfile = true;
|
||||
|
||||
s->glfs = qemu_gluster_init(gconf, filename, options, errp);
|
||||
if (!s->glfs) {
|
||||
@ -917,7 +916,6 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
|
||||
gconf->debug = s->debug;
|
||||
gconf->has_debug = true;
|
||||
gconf->logfile = g_strdup(s->logfile);
|
||||
gconf->has_logfile = true;
|
||||
|
||||
/*
|
||||
* If 'state->bs->exact_filename' is empty, 'state->options' should contain
|
||||
@ -1162,7 +1160,6 @@ static int coroutine_fn qemu_gluster_co_create_opts(BlockDriver *drv,
|
||||
if (!gconf->logfile) {
|
||||
gconf->logfile = g_strdup(GLUSTER_LOGFILE_DEFAULT);
|
||||
}
|
||||
gconf->has_logfile = true;
|
||||
|
||||
ret = qemu_gluster_parse(gconf, filename, NULL, errp);
|
||||
if (ret < 0) {
|
||||
|
275
block/graph-lock.c
Normal file
275
block/graph-lock.c
Normal file
@ -0,0 +1,275 @@
|
||||
/*
|
||||
* Graph lock: rwlock to protect block layer graph manipulations (add/remove
|
||||
* edges and nodes)
|
||||
*
|
||||
* Copyright (c) 2022 Red Hat
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "block/graph-lock.h"
|
||||
#include "block/block.h"
|
||||
#include "block/block_int.h"
|
||||
|
||||
/* Dummy lock object to use for Thread Safety Analysis (TSA) */
|
||||
BdrvGraphLock graph_lock;
|
||||
|
||||
/* Protects the list of aiocontext and orphaned_reader_count */
|
||||
static QemuMutex aio_context_list_lock;
|
||||
|
||||
/* Written and read with atomic operations. */
|
||||
static int has_writer;
|
||||
|
||||
/*
|
||||
* A reader coroutine could move from an AioContext to another.
|
||||
* If this happens, there is no problem from the point of view of
|
||||
* counters. The problem is that the total count becomes
|
||||
* unbalanced if one of the two AioContexts gets deleted.
|
||||
* The count of readers must remain correct, so the AioContext's
|
||||
* balance is transferred to this glboal variable.
|
||||
* Protected by aio_context_list_lock.
|
||||
*/
|
||||
static uint32_t orphaned_reader_count;
|
||||
|
||||
/* Queue of readers waiting for the writer to finish */
|
||||
static CoQueue reader_queue;
|
||||
|
||||
struct BdrvGraphRWlock {
|
||||
/* How many readers are currently reading the graph. */
|
||||
uint32_t reader_count;
|
||||
|
||||
/*
|
||||
* List of BdrvGraphRWlock kept in graph-lock.c
|
||||
* Protected by aio_context_list_lock
|
||||
*/
|
||||
QTAILQ_ENTRY(BdrvGraphRWlock) next_aio;
|
||||
};
|
||||
|
||||
/*
|
||||
* List of BdrvGraphRWlock. This list ensures that each BdrvGraphRWlock
|
||||
* can safely modify only its own counter, avoid reading/writing
|
||||
* others and thus improving performances by avoiding cacheline bounces.
|
||||
*/
|
||||
static QTAILQ_HEAD(, BdrvGraphRWlock) aio_context_list =
|
||||
QTAILQ_HEAD_INITIALIZER(aio_context_list);
|
||||
|
||||
static void __attribute__((__constructor__)) bdrv_init_graph_lock(void)
|
||||
{
|
||||
qemu_mutex_init(&aio_context_list_lock);
|
||||
qemu_co_queue_init(&reader_queue);
|
||||
}
|
||||
|
||||
void register_aiocontext(AioContext *ctx)
|
||||
{
|
||||
ctx->bdrv_graph = g_new0(BdrvGraphRWlock, 1);
|
||||
QEMU_LOCK_GUARD(&aio_context_list_lock);
|
||||
assert(ctx->bdrv_graph->reader_count == 0);
|
||||
QTAILQ_INSERT_TAIL(&aio_context_list, ctx->bdrv_graph, next_aio);
|
||||
}
|
||||
|
||||
void unregister_aiocontext(AioContext *ctx)
|
||||
{
|
||||
QEMU_LOCK_GUARD(&aio_context_list_lock);
|
||||
orphaned_reader_count += ctx->bdrv_graph->reader_count;
|
||||
QTAILQ_REMOVE(&aio_context_list, ctx->bdrv_graph, next_aio);
|
||||
g_free(ctx->bdrv_graph);
|
||||
}
|
||||
|
||||
static uint32_t reader_count(void)
|
||||
{
|
||||
BdrvGraphRWlock *brdv_graph;
|
||||
uint32_t rd;
|
||||
|
||||
QEMU_LOCK_GUARD(&aio_context_list_lock);
|
||||
|
||||
/* rd can temporarly be negative, but the total will *always* be >= 0 */
|
||||
rd = orphaned_reader_count;
|
||||
QTAILQ_FOREACH(brdv_graph, &aio_context_list, next_aio) {
|
||||
rd += qatomic_read(&brdv_graph->reader_count);
|
||||
}
|
||||
|
||||
/* shouldn't overflow unless there are 2^31 readers */
|
||||
assert((int32_t)rd >= 0);
|
||||
return rd;
|
||||
}
|
||||
|
||||
void bdrv_graph_wrlock(void)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(!qatomic_read(&has_writer));
|
||||
|
||||
/* Make sure that constantly arriving new I/O doesn't cause starvation */
|
||||
bdrv_drain_all_begin_nopoll();
|
||||
|
||||
/*
|
||||
* reader_count == 0: this means writer will read has_reader as 1
|
||||
* reader_count >= 1: we don't know if writer read has_writer == 0 or 1,
|
||||
* but we need to wait.
|
||||
* Wait by allowing other coroutine (and possible readers) to continue.
|
||||
*/
|
||||
do {
|
||||
/*
|
||||
* has_writer must be 0 while polling, otherwise we get a deadlock if
|
||||
* any callback involved during AIO_WAIT_WHILE() tries to acquire the
|
||||
* reader lock.
|
||||
*/
|
||||
qatomic_set(&has_writer, 0);
|
||||
AIO_WAIT_WHILE(qemu_get_aio_context(), reader_count() >= 1);
|
||||
qatomic_set(&has_writer, 1);
|
||||
|
||||
/*
|
||||
* We want to only check reader_count() after has_writer = 1 is visible
|
||||
* to other threads. That way no more readers can sneak in after we've
|
||||
* determined reader_count() == 0.
|
||||
*/
|
||||
smp_mb();
|
||||
} while (reader_count() >= 1);
|
||||
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
|
||||
void bdrv_graph_wrunlock(void)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
QEMU_LOCK_GUARD(&aio_context_list_lock);
|
||||
assert(qatomic_read(&has_writer));
|
||||
|
||||
/*
|
||||
* No need for memory barriers, this works in pair with
|
||||
* the slow path of rdlock() and both take the lock.
|
||||
*/
|
||||
qatomic_store_release(&has_writer, 0);
|
||||
|
||||
/* Wake up all coroutine that are waiting to read the graph */
|
||||
qemu_co_enter_all(&reader_queue, &aio_context_list_lock);
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_graph_co_rdlock(void)
|
||||
{
|
||||
BdrvGraphRWlock *bdrv_graph;
|
||||
bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
|
||||
|
||||
/* Do not lock if in main thread */
|
||||
if (qemu_in_main_thread()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
qatomic_set(&bdrv_graph->reader_count,
|
||||
bdrv_graph->reader_count + 1);
|
||||
/* make sure writer sees reader_count before we check has_writer */
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* has_writer == 0: this means writer will read reader_count as >= 1
|
||||
* has_writer == 1: we don't know if writer read reader_count == 0
|
||||
* or > 0, but we need to wait anyways because
|
||||
* it will write.
|
||||
*/
|
||||
if (!qatomic_read(&has_writer)) {
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Synchronize access with reader_count() in bdrv_graph_wrlock().
|
||||
* Case 1:
|
||||
* If this critical section gets executed first, reader_count will
|
||||
* decrease and the reader will go to sleep.
|
||||
* Then the writer will read reader_count that does not take into
|
||||
* account this reader, and if there's no other reader it will
|
||||
* enter the write section.
|
||||
* Case 2:
|
||||
* If reader_count() critical section gets executed first,
|
||||
* then writer will read reader_count >= 1.
|
||||
* It will wait in AIO_WAIT_WHILE(), but once it releases the lock
|
||||
* we will enter this critical section and call aio_wait_kick().
|
||||
*/
|
||||
WITH_QEMU_LOCK_GUARD(&aio_context_list_lock) {
|
||||
/*
|
||||
* Additional check when we use the above lock to synchronize
|
||||
* with bdrv_graph_wrunlock().
|
||||
* Case 1:
|
||||
* If this gets executed first, has_writer is still 1, so we reduce
|
||||
* reader_count and go to sleep.
|
||||
* Then the writer will set has_writer to 0 and wake up all readers,
|
||||
* us included.
|
||||
* Case 2:
|
||||
* If bdrv_graph_wrunlock() critical section gets executed first,
|
||||
* then it will set has_writer to 0 and wake up all other readers.
|
||||
* Then we execute this critical section, and therefore must check
|
||||
* again for has_writer, otherwise we sleep without any writer
|
||||
* actually running.
|
||||
*/
|
||||
if (!qatomic_read(&has_writer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* slow path where reader sleeps */
|
||||
bdrv_graph->reader_count--;
|
||||
aio_wait_kick();
|
||||
qemu_co_queue_wait(&reader_queue, &aio_context_list_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_graph_co_rdunlock(void)
|
||||
{
|
||||
BdrvGraphRWlock *bdrv_graph;
|
||||
bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
|
||||
|
||||
/* Do not lock if in main thread */
|
||||
if (qemu_in_main_thread()) {
|
||||
return;
|
||||
}
|
||||
|
||||
qatomic_store_release(&bdrv_graph->reader_count,
|
||||
bdrv_graph->reader_count - 1);
|
||||
/* make sure writer sees reader_count before we check has_writer */
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* has_writer == 0: this means reader will read reader_count decreased
|
||||
* has_writer == 1: we don't know if writer read reader_count old or
|
||||
* new. Therefore, kick again so on next iteration
|
||||
* writer will for sure read the updated value.
|
||||
*/
|
||||
if (qatomic_read(&has_writer)) {
|
||||
aio_wait_kick();
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_graph_rdlock_main_loop(void)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(!qemu_in_coroutine());
|
||||
}
|
||||
|
||||
void bdrv_graph_rdunlock_main_loop(void)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(!qemu_in_coroutine());
|
||||
}
|
||||
|
||||
void assert_bdrv_graph_readable(void)
|
||||
{
|
||||
assert(qemu_in_main_thread() || reader_count());
|
||||
}
|
||||
|
||||
void assert_bdrv_graph_writable(void)
|
||||
{
|
||||
assert(qemu_in_main_thread());
|
||||
assert(qatomic_read(&has_writer));
|
||||
}
|
380
block/io.c
380
block/io.c
@ -45,53 +45,43 @@ static void bdrv_parent_cb_resize(BlockDriverState *bs);
|
||||
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags);
|
||||
|
||||
static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
|
||||
bool ignore_bds_parents)
|
||||
static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
|
||||
{
|
||||
BdrvChild *c, *next;
|
||||
|
||||
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
|
||||
if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
|
||||
if (c == ignore) {
|
||||
continue;
|
||||
}
|
||||
bdrv_parent_drained_begin_single(c, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
|
||||
int *drained_end_counter)
|
||||
{
|
||||
assert(c->parent_quiesce_counter > 0);
|
||||
c->parent_quiesce_counter--;
|
||||
if (c->klass->drained_end) {
|
||||
c->klass->drained_end(c, drained_end_counter);
|
||||
bdrv_parent_drained_begin_single(c);
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_parent_drained_end_single(BdrvChild *c)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
AioContext *ctx = bdrv_child_get_parent_aio_context(c);
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
|
||||
AIO_WAIT_WHILE(ctx, qatomic_read(&drained_end_counter) > 0);
|
||||
|
||||
assert(c->quiesced_parent);
|
||||
c->quiesced_parent = false;
|
||||
|
||||
if (c->klass->drained_end) {
|
||||
c->klass->drained_end(c);
|
||||
}
|
||||
}
|
||||
|
||||
static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
|
||||
bool ignore_bds_parents,
|
||||
int *drained_end_counter)
|
||||
static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
|
||||
{
|
||||
BdrvChild *c;
|
||||
|
||||
QLIST_FOREACH(c, &bs->parents, next_parent) {
|
||||
if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
|
||||
if (c == ignore) {
|
||||
continue;
|
||||
}
|
||||
bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
|
||||
bdrv_parent_drained_end_single(c);
|
||||
}
|
||||
}
|
||||
|
||||
static bool bdrv_parent_drained_poll_single(BdrvChild *c)
|
||||
bool bdrv_parent_drained_poll_single(BdrvChild *c)
|
||||
{
|
||||
if (c->klass->drained_poll) {
|
||||
return c->klass->drained_poll(c);
|
||||
@ -115,17 +105,16 @@ static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
|
||||
return busy;
|
||||
}
|
||||
|
||||
void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
|
||||
void bdrv_parent_drained_begin_single(BdrvChild *c)
|
||||
{
|
||||
AioContext *ctx = bdrv_child_get_parent_aio_context(c);
|
||||
IO_OR_GS_CODE();
|
||||
c->parent_quiesce_counter++;
|
||||
|
||||
assert(!c->quiesced_parent);
|
||||
c->quiesced_parent = true;
|
||||
|
||||
if (c->klass->drained_begin) {
|
||||
c->klass->drained_begin(c);
|
||||
}
|
||||
if (poll) {
|
||||
AIO_WAIT_WHILE(ctx, bdrv_parent_drained_poll_single(c));
|
||||
}
|
||||
}
|
||||
|
||||
static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
|
||||
@ -245,69 +234,14 @@ typedef struct {
|
||||
BlockDriverState *bs;
|
||||
bool done;
|
||||
bool begin;
|
||||
bool recursive;
|
||||
bool poll;
|
||||
BdrvChild *parent;
|
||||
bool ignore_bds_parents;
|
||||
int *drained_end_counter;
|
||||
} BdrvCoDrainData;
|
||||
|
||||
static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
|
||||
{
|
||||
BdrvCoDrainData *data = opaque;
|
||||
BlockDriverState *bs = data->bs;
|
||||
|
||||
if (data->begin) {
|
||||
bs->drv->bdrv_co_drain_begin(bs);
|
||||
} else {
|
||||
bs->drv->bdrv_co_drain_end(bs);
|
||||
}
|
||||
|
||||
/* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
|
||||
qatomic_mb_set(&data->done, true);
|
||||
if (!data->begin) {
|
||||
qatomic_dec(data->drained_end_counter);
|
||||
}
|
||||
bdrv_dec_in_flight(bs);
|
||||
|
||||
g_free(data);
|
||||
}
|
||||
|
||||
/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
|
||||
static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
|
||||
int *drained_end_counter)
|
||||
{
|
||||
BdrvCoDrainData *data;
|
||||
|
||||
if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
|
||||
(!begin && !bs->drv->bdrv_co_drain_end)) {
|
||||
return;
|
||||
}
|
||||
|
||||
data = g_new(BdrvCoDrainData, 1);
|
||||
*data = (BdrvCoDrainData) {
|
||||
.bs = bs,
|
||||
.done = false,
|
||||
.begin = begin,
|
||||
.drained_end_counter = drained_end_counter,
|
||||
};
|
||||
|
||||
if (!begin) {
|
||||
qatomic_inc(drained_end_counter);
|
||||
}
|
||||
|
||||
/* Make sure the driver callback completes during the polling phase for
|
||||
* drain_begin. */
|
||||
bdrv_inc_in_flight(bs);
|
||||
data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
|
||||
aio_co_schedule(bdrv_get_aio_context(bs), data->co);
|
||||
}
|
||||
|
||||
/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
|
||||
bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
|
||||
BdrvChild *ignore_parent, bool ignore_bds_parents)
|
||||
bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
|
||||
bool ignore_bds_parents)
|
||||
{
|
||||
BdrvChild *child, *next;
|
||||
IO_OR_GS_CODE();
|
||||
|
||||
if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
|
||||
@ -318,30 +252,18 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
|
||||
return true;
|
||||
}
|
||||
|
||||
if (recursive) {
|
||||
assert(!ignore_bds_parents);
|
||||
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
|
||||
if (bdrv_drain_poll(child->bs, recursive, child, false)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
|
||||
static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
|
||||
BdrvChild *ignore_parent)
|
||||
{
|
||||
return bdrv_drain_poll(bs, recursive, ignore_parent, false);
|
||||
return bdrv_drain_poll(bs, ignore_parent, false);
|
||||
}
|
||||
|
||||
static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
|
||||
BdrvChild *parent, bool ignore_bds_parents,
|
||||
static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
|
||||
bool poll);
|
||||
static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
|
||||
BdrvChild *parent, bool ignore_bds_parents,
|
||||
int *drained_end_counter);
|
||||
static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
|
||||
|
||||
static void bdrv_co_drain_bh_cb(void *opaque)
|
||||
{
|
||||
@ -354,14 +276,10 @@ static void bdrv_co_drain_bh_cb(void *opaque)
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_dec_in_flight(bs);
|
||||
if (data->begin) {
|
||||
assert(!data->drained_end_counter);
|
||||
bdrv_do_drained_begin(bs, data->recursive, data->parent,
|
||||
data->ignore_bds_parents, data->poll);
|
||||
bdrv_do_drained_begin(bs, data->parent, data->poll);
|
||||
} else {
|
||||
assert(!data->poll);
|
||||
bdrv_do_drained_end(bs, data->recursive, data->parent,
|
||||
data->ignore_bds_parents,
|
||||
data->drained_end_counter);
|
||||
bdrv_do_drained_end(bs, data->parent);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
} else {
|
||||
@ -374,11 +292,9 @@ static void bdrv_co_drain_bh_cb(void *opaque)
|
||||
}
|
||||
|
||||
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
||||
bool begin, bool recursive,
|
||||
bool begin,
|
||||
BdrvChild *parent,
|
||||
bool ignore_bds_parents,
|
||||
bool poll,
|
||||
int *drained_end_counter)
|
||||
bool poll)
|
||||
{
|
||||
BdrvCoDrainData data;
|
||||
Coroutine *self = qemu_coroutine_self();
|
||||
@ -394,11 +310,8 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
||||
.bs = bs,
|
||||
.done = false,
|
||||
.begin = begin,
|
||||
.recursive = recursive,
|
||||
.parent = parent,
|
||||
.ignore_bds_parents = ignore_bds_parents,
|
||||
.poll = poll,
|
||||
.drained_end_counter = drained_end_counter,
|
||||
};
|
||||
|
||||
if (bs) {
|
||||
@ -429,41 +342,22 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
|
||||
BdrvChild *parent, bool ignore_bds_parents)
|
||||
static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
|
||||
bool poll)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
assert(!qemu_in_coroutine());
|
||||
|
||||
if (qemu_in_coroutine()) {
|
||||
bdrv_co_yield_to_drain(bs, true, parent, poll);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Stop things in parent-to-child order */
|
||||
if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
|
||||
aio_disable_external(bdrv_get_aio_context(bs));
|
||||
}
|
||||
|
||||
bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
|
||||
bdrv_drain_invoke(bs, true, NULL);
|
||||
}
|
||||
|
||||
static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
|
||||
BdrvChild *parent, bool ignore_bds_parents,
|
||||
bool poll)
|
||||
{
|
||||
BdrvChild *child, *next;
|
||||
|
||||
if (qemu_in_coroutine()) {
|
||||
bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
|
||||
poll, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
|
||||
|
||||
if (recursive) {
|
||||
assert(!ignore_bds_parents);
|
||||
bs->recursive_quiesce_counter++;
|
||||
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
|
||||
bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
|
||||
false);
|
||||
bdrv_parent_drained_begin(bs, parent);
|
||||
if (bs->drv && bs->drv->bdrv_drain_begin) {
|
||||
bs->drv->bdrv_drain_begin(bs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -477,117 +371,50 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
|
||||
* nodes.
|
||||
*/
|
||||
if (poll) {
|
||||
assert(!ignore_bds_parents);
|
||||
BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
|
||||
BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
|
||||
{
|
||||
bdrv_do_drained_begin(bs, parent, false);
|
||||
}
|
||||
|
||||
void bdrv_drained_begin(BlockDriverState *bs)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_begin(bs, false, NULL, false, true);
|
||||
}
|
||||
|
||||
void bdrv_subtree_drained_begin(BlockDriverState *bs)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_begin(bs, true, NULL, false, true);
|
||||
bdrv_do_drained_begin(bs, NULL, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function does not poll, nor must any of its recursively called
|
||||
* functions. The *drained_end_counter pointee will be incremented
|
||||
* once for every background operation scheduled, and decremented once
|
||||
* the operation settles. Therefore, the pointer must remain valid
|
||||
* until the pointee reaches 0. That implies that whoever sets up the
|
||||
* pointee has to poll until it is 0.
|
||||
*
|
||||
* We use atomic operations to access *drained_end_counter, because
|
||||
* (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
|
||||
* @bs may contain nodes in different AioContexts,
|
||||
* (2) bdrv_drain_all_end() uses the same counter for all nodes,
|
||||
* regardless of which AioContext they are in.
|
||||
* functions.
|
||||
*/
|
||||
static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
|
||||
BdrvChild *parent, bool ignore_bds_parents,
|
||||
int *drained_end_counter)
|
||||
static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
|
||||
{
|
||||
BdrvChild *child;
|
||||
int old_quiesce_counter;
|
||||
|
||||
assert(drained_end_counter != NULL);
|
||||
|
||||
if (qemu_in_coroutine()) {
|
||||
bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
|
||||
false, drained_end_counter);
|
||||
bdrv_co_yield_to_drain(bs, false, parent, false);
|
||||
return;
|
||||
}
|
||||
assert(bs->quiesce_counter > 0);
|
||||
|
||||
/* Re-enable things in child-to-parent order */
|
||||
bdrv_drain_invoke(bs, false, drained_end_counter);
|
||||
bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
|
||||
drained_end_counter);
|
||||
|
||||
old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
|
||||
if (old_quiesce_counter == 1) {
|
||||
if (bs->drv && bs->drv->bdrv_drain_end) {
|
||||
bs->drv->bdrv_drain_end(bs);
|
||||
}
|
||||
bdrv_parent_drained_end(bs, parent);
|
||||
aio_enable_external(bdrv_get_aio_context(bs));
|
||||
}
|
||||
|
||||
if (recursive) {
|
||||
assert(!ignore_bds_parents);
|
||||
bs->recursive_quiesce_counter--;
|
||||
QLIST_FOREACH(child, &bs->children, next) {
|
||||
bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
|
||||
drained_end_counter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_drained_end(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
|
||||
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
|
||||
{
|
||||
IO_CODE();
|
||||
bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
|
||||
}
|
||||
|
||||
void bdrv_subtree_drained_end(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
|
||||
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
|
||||
{
|
||||
int i;
|
||||
IO_OR_GS_CODE();
|
||||
|
||||
for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
|
||||
bdrv_do_drained_begin(child->bs, true, child, false, true);
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
int i;
|
||||
IO_OR_GS_CODE();
|
||||
|
||||
for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
|
||||
bdrv_do_drained_end(child->bs, true, child, false,
|
||||
&drained_end_counter);
|
||||
}
|
||||
|
||||
BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
|
||||
bdrv_do_drained_end(bs, NULL);
|
||||
}
|
||||
|
||||
void bdrv_drain(BlockDriverState *bs)
|
||||
@ -620,7 +447,7 @@ static bool bdrv_drain_all_poll(void)
|
||||
while ((bs = bdrv_next_all_states(bs))) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
result |= bdrv_drain_poll(bs, false, NULL, true);
|
||||
result |= bdrv_drain_poll(bs, NULL, true);
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
@ -639,16 +466,11 @@ static bool bdrv_drain_all_poll(void)
|
||||
* NOTE: no new block jobs or BlockDriverStates can be created between
|
||||
* the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
|
||||
*/
|
||||
void bdrv_drain_all_begin(void)
|
||||
void bdrv_drain_all_begin_nopoll(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (qemu_in_coroutine()) {
|
||||
bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* bdrv queue is managed by record/replay,
|
||||
* waiting for finishing the I/O requests may
|
||||
@ -670,9 +492,30 @@ void bdrv_drain_all_begin(void)
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
bdrv_do_drained_begin(bs, false, NULL, true, false);
|
||||
bdrv_do_drained_begin(bs, NULL, false);
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_drain_all_begin(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
|
||||
if (qemu_in_coroutine()) {
|
||||
bdrv_co_yield_to_drain(NULL, true, NULL, true);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* bdrv queue is managed by record/replay,
|
||||
* waiting for finishing the I/O requests may
|
||||
* be infinite
|
||||
*/
|
||||
if (replay_events_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_drain_all_begin_nopoll();
|
||||
|
||||
/* Now poll the in-flight requests */
|
||||
AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
|
||||
@ -684,22 +527,19 @@ void bdrv_drain_all_begin(void)
|
||||
|
||||
void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
g_assert(bs->quiesce_counter > 0);
|
||||
g_assert(!bs->refcnt);
|
||||
|
||||
while (bs->quiesce_counter) {
|
||||
bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
|
||||
bdrv_do_drained_end(bs, NULL);
|
||||
}
|
||||
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
void bdrv_drain_all_end(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
int drained_end_counter = 0;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/*
|
||||
@ -715,13 +555,11 @@ void bdrv_drain_all_end(void)
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
|
||||
bdrv_do_drained_end(bs, NULL);
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
|
||||
|
||||
assert(bdrv_drain_all_count > 0);
|
||||
bdrv_drain_all_count--;
|
||||
}
|
||||
@ -2711,6 +2549,17 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_co_common_block_status_above(bs, base, false, true, offset,
|
||||
bytes, pnum, map, file, NULL);
|
||||
}
|
||||
|
||||
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
|
||||
int64_t offset, int64_t bytes, int64_t *pnum,
|
||||
int64_t *map, BlockDriverState **file)
|
||||
@ -2756,6 +2605,22 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
|
||||
return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
|
||||
}
|
||||
|
||||
int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int ret;
|
||||
int64_t dummy;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
|
||||
bytes, pnum ? pnum : &dummy, NULL,
|
||||
NULL, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return !!(ret & BDRV_BLOCK_ALLOCATED);
|
||||
}
|
||||
|
||||
int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum)
|
||||
{
|
||||
@ -2772,6 +2637,29 @@ int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
return !!(ret & BDRV_BLOCK_ALLOCATED);
|
||||
}
|
||||
|
||||
/* See bdrv_is_allocated_above for documentation */
|
||||
int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int depth;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_co_common_block_status_above(top, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret & BDRV_BLOCK_ALLOCATED) {
|
||||
return depth;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
|
||||
*
|
||||
@ -2795,10 +2683,12 @@ int bdrv_is_allocated_above(BlockDriverState *top,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int depth;
|
||||
int ret = bdrv_common_block_status_above(top, base, include_base, false,
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_common_block_status_above(top, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
IO_CODE();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -2816,6 +2706,7 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
||||
BlockDriverState *child_bs = bdrv_primary_bs(bs);
|
||||
int ret;
|
||||
IO_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
|
||||
if (ret < 0) {
|
||||
@ -2848,6 +2739,7 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
||||
BlockDriverState *child_bs = bdrv_primary_bs(bs);
|
||||
int ret;
|
||||
IO_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
|
||||
if (ret < 0) {
|
||||
|
@ -10,6 +10,7 @@ block_ss.add(files(
|
||||
'blkverify.c',
|
||||
'block-backend.c',
|
||||
'block-copy.c',
|
||||
'graph-lock.c',
|
||||
'commit.c',
|
||||
'copy-on-read.c',
|
||||
'preallocate.c',
|
||||
@ -137,6 +138,7 @@ block_gen_c = custom_target('block-gen.c',
|
||||
output: 'block-gen.c',
|
||||
input: files(
|
||||
'../include/block/block-io.h',
|
||||
'../include/block/dirty-bitmap.h',
|
||||
'../include/block/block-global-state.h',
|
||||
'../include/sysemu/block-backend-io.h',
|
||||
'coroutines.h'
|
||||
|
@ -241,7 +241,6 @@ void hmp_drive_mirror(Monitor *mon, const QDict *qdict)
|
||||
DriveMirror mirror = {
|
||||
.device = (char *)qdict_get_str(qdict, "device"),
|
||||
.target = (char *)filename,
|
||||
.has_format = !!format,
|
||||
.format = (char *)format,
|
||||
.sync = full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP,
|
||||
.has_mode = true,
|
||||
@ -270,7 +269,6 @@ void hmp_drive_backup(Monitor *mon, const QDict *qdict)
|
||||
DriveBackup backup = {
|
||||
.device = (char *)device,
|
||||
.target = (char *)filename,
|
||||
.has_format = !!format,
|
||||
.format = (char *)format,
|
||||
.sync = full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP,
|
||||
.has_mode = true,
|
||||
@ -360,9 +358,7 @@ void hmp_snapshot_blkdev(Monitor *mon, const QDict *qdict)
|
||||
}
|
||||
|
||||
mode = reuse ? NEW_IMAGE_MODE_EXISTING : NEW_IMAGE_MODE_ABSOLUTE_PATHS;
|
||||
qmp_blockdev_snapshot_sync(true, device, false, NULL,
|
||||
filename, false, NULL,
|
||||
!!format, format,
|
||||
qmp_blockdev_snapshot_sync(device, NULL, filename, NULL, format,
|
||||
true, mode, &err);
|
||||
end:
|
||||
hmp_handle_error(mon, err);
|
||||
@ -385,8 +381,7 @@ void hmp_snapshot_delete_blkdev_internal(Monitor *mon, const QDict *qdict)
|
||||
const char *id = qdict_get_try_str(qdict, "id");
|
||||
Error *err = NULL;
|
||||
|
||||
qmp_blockdev_snapshot_delete_internal_sync(device, !!id, id,
|
||||
true, name, &err);
|
||||
qmp_blockdev_snapshot_delete_internal_sync(device, id, name, &err);
|
||||
hmp_handle_error(mon, err);
|
||||
}
|
||||
|
||||
@ -427,7 +422,7 @@ void hmp_nbd_server_start(Monitor *mon, const QDict *qdict)
|
||||
block_list = qmp_query_block(NULL);
|
||||
|
||||
for (info = block_list; info; info = info->next) {
|
||||
if (!info->value->has_inserted) {
|
||||
if (!info->value->inserted) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -460,7 +455,6 @@ void hmp_nbd_server_add(Monitor *mon, const QDict *qdict)
|
||||
|
||||
NbdServerAddOptions export = {
|
||||
.device = (char *) device,
|
||||
.has_name = !!name,
|
||||
.name = (char *) name,
|
||||
.has_writable = true,
|
||||
.writable = writable,
|
||||
@ -495,7 +489,7 @@ void coroutine_fn hmp_block_resize(Monitor *mon, const QDict *qdict)
|
||||
int64_t size = qdict_get_int(qdict, "size");
|
||||
Error *err = NULL;
|
||||
|
||||
qmp_block_resize(true, device, false, NULL, size, &err);
|
||||
qmp_block_resize(device, NULL, size, &err);
|
||||
hmp_handle_error(mon, err);
|
||||
}
|
||||
|
||||
@ -506,11 +500,10 @@ void hmp_block_stream(Monitor *mon, const QDict *qdict)
|
||||
const char *base = qdict_get_try_str(qdict, "base");
|
||||
int64_t speed = qdict_get_try_int(qdict, "speed", 0);
|
||||
|
||||
qmp_block_stream(true, device, device, base != NULL, base, false, NULL,
|
||||
false, NULL, false, NULL,
|
||||
qdict_haskey(qdict, "speed"), speed, true,
|
||||
BLOCKDEV_ON_ERROR_REPORT, false, NULL, false, false, false,
|
||||
false, &error);
|
||||
qmp_block_stream(device, device, base, NULL, NULL, NULL,
|
||||
qdict_haskey(qdict, "speed"), speed,
|
||||
true, BLOCKDEV_ON_ERROR_REPORT, NULL,
|
||||
false, false, false, false, &error);
|
||||
|
||||
hmp_handle_error(mon, error);
|
||||
}
|
||||
@ -534,10 +527,8 @@ void hmp_block_set_io_throttle(Monitor *mon, const QDict *qdict)
|
||||
* version has only one, so we must decide which one to pass.
|
||||
*/
|
||||
if (blk_by_name(device)) {
|
||||
throttle.has_device = true;
|
||||
throttle.device = device;
|
||||
} else {
|
||||
throttle.has_id = true;
|
||||
throttle.id = device;
|
||||
}
|
||||
|
||||
@ -551,7 +542,7 @@ void hmp_eject(Monitor *mon, const QDict *qdict)
|
||||
const char *device = qdict_get_str(qdict, "device");
|
||||
Error *err = NULL;
|
||||
|
||||
qmp_eject(true, device, false, NULL, true, force, &err);
|
||||
qmp_eject(device, NULL, true, force, &err);
|
||||
hmp_handle_error(mon, err);
|
||||
}
|
||||
|
||||
@ -635,18 +626,18 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
|
||||
{
|
||||
ImageInfo *image_info;
|
||||
|
||||
assert(!info || !info->has_inserted || info->inserted == inserted);
|
||||
assert(!info || !info->inserted || info->inserted == inserted);
|
||||
|
||||
if (info && *info->device) {
|
||||
monitor_puts(mon, info->device);
|
||||
if (inserted && inserted->has_node_name) {
|
||||
if (inserted && inserted->node_name) {
|
||||
monitor_printf(mon, " (%s)", inserted->node_name);
|
||||
}
|
||||
} else {
|
||||
assert(info || inserted);
|
||||
monitor_puts(mon,
|
||||
inserted && inserted->has_node_name ? inserted->node_name
|
||||
: info && info->has_qdev ? info->qdev
|
||||
inserted && inserted->node_name ? inserted->node_name
|
||||
: info && info->qdev ? info->qdev
|
||||
: "<anonymous>");
|
||||
}
|
||||
|
||||
@ -661,7 +652,7 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
|
||||
}
|
||||
|
||||
if (info) {
|
||||
if (info->has_qdev) {
|
||||
if (info->qdev) {
|
||||
monitor_printf(mon, " Attached to: %s\n", info->qdev);
|
||||
}
|
||||
if (info->has_io_status && info->io_status != BLOCK_DEVICE_IO_STATUS_OK) {
|
||||
@ -686,7 +677,7 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
|
||||
inserted->cache->direct ? ", direct" : "",
|
||||
inserted->cache->no_flush ? ", ignore flushes" : "");
|
||||
|
||||
if (inserted->has_backing_file) {
|
||||
if (inserted->backing_file) {
|
||||
monitor_printf(mon,
|
||||
" Backing file: %s "
|
||||
"(chain depth: %" PRId64 ")\n",
|
||||
@ -735,7 +726,7 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
|
||||
image_info = inserted->image;
|
||||
while (1) {
|
||||
bdrv_image_info_dump(image_info);
|
||||
if (image_info->has_backing_image) {
|
||||
if (image_info->backing_image) {
|
||||
image_info = image_info->backing_image;
|
||||
} else {
|
||||
break;
|
||||
@ -769,8 +760,7 @@ void hmp_info_block(Monitor *mon, const QDict *qdict)
|
||||
monitor_printf(mon, "\n");
|
||||
}
|
||||
|
||||
print_block_info(mon, info->value, info->value->has_inserted
|
||||
? info->value->inserted : NULL,
|
||||
print_block_info(mon, info->value, info->value->inserted,
|
||||
verbose);
|
||||
printed = true;
|
||||
}
|
||||
@ -784,7 +774,7 @@ void hmp_info_block(Monitor *mon, const QDict *qdict)
|
||||
/* Print node information */
|
||||
blockdev_list = qmp_query_named_block_nodes(false, false, NULL);
|
||||
for (blockdev = blockdev_list; blockdev; blockdev = blockdev->next) {
|
||||
assert(blockdev->value->has_node_name);
|
||||
assert(blockdev->value->node_name);
|
||||
if (device && strcmp(device, blockdev->value->node_name)) {
|
||||
continue;
|
||||
}
|
||||
@ -805,7 +795,7 @@ void hmp_info_blockstats(Monitor *mon, const QDict *qdict)
|
||||
stats_list = qmp_query_blockstats(false, false, NULL);
|
||||
|
||||
for (stats = stats_list; stats; stats = stats->next) {
|
||||
if (!stats->value->has_device) {
|
||||
if (!stats->value->device) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -646,7 +646,7 @@ static int coroutine_fn parallels_co_create_opts(BlockDriver *drv,
|
||||
}
|
||||
|
||||
/* Create and open the file (protocol layer) */
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto done;
|
||||
}
|
||||
|
@ -116,8 +116,8 @@ static int do_open_tray(const char *blk_name, const char *qdev_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qmp_blockdev_open_tray(bool has_device, const char *device,
|
||||
bool has_id, const char *id,
|
||||
void qmp_blockdev_open_tray(const char *device,
|
||||
const char *id,
|
||||
bool has_force, bool force,
|
||||
Error **errp)
|
||||
{
|
||||
@ -127,9 +127,7 @@ void qmp_blockdev_open_tray(bool has_device, const char *device,
|
||||
if (!has_force) {
|
||||
force = false;
|
||||
}
|
||||
rc = do_open_tray(has_device ? device : NULL,
|
||||
has_id ? id : NULL,
|
||||
force, &local_err);
|
||||
rc = do_open_tray(device, id, force, &local_err);
|
||||
if (rc && rc != -ENOSYS && rc != -EINPROGRESS) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
@ -137,16 +135,13 @@ void qmp_blockdev_open_tray(bool has_device, const char *device,
|
||||
error_free(local_err);
|
||||
}
|
||||
|
||||
void qmp_blockdev_close_tray(bool has_device, const char *device,
|
||||
bool has_id, const char *id,
|
||||
void qmp_blockdev_close_tray(const char *device,
|
||||
const char *id,
|
||||
Error **errp)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
Error *local_err = NULL;
|
||||
|
||||
device = has_device ? device : NULL;
|
||||
id = has_id ? id : NULL;
|
||||
|
||||
blk = qmp_get_blk(device, id, errp);
|
||||
if (!blk) {
|
||||
return;
|
||||
@ -173,17 +168,14 @@ void qmp_blockdev_close_tray(bool has_device, const char *device,
|
||||
}
|
||||
}
|
||||
|
||||
static void blockdev_remove_medium(bool has_device, const char *device,
|
||||
bool has_id, const char *id, Error **errp)
|
||||
static void blockdev_remove_medium(const char *device, const char *id,
|
||||
Error **errp)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
AioContext *aio_context;
|
||||
bool has_attached_device;
|
||||
|
||||
device = has_device ? device : NULL;
|
||||
id = has_id ? id : NULL;
|
||||
|
||||
blk = qmp_get_blk(device, id, errp);
|
||||
if (!blk) {
|
||||
return;
|
||||
@ -232,7 +224,7 @@ out:
|
||||
|
||||
void qmp_blockdev_remove_medium(const char *id, Error **errp)
|
||||
{
|
||||
blockdev_remove_medium(false, NULL, true, id, errp);
|
||||
blockdev_remove_medium(NULL, id, errp);
|
||||
}
|
||||
|
||||
static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
|
||||
@ -280,16 +272,13 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
|
||||
}
|
||||
}
|
||||
|
||||
static void blockdev_insert_medium(bool has_device, const char *device,
|
||||
bool has_id, const char *id,
|
||||
static void blockdev_insert_medium(const char *device, const char *id,
|
||||
const char *node_name, Error **errp)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
|
||||
blk = qmp_get_blk(has_device ? device : NULL,
|
||||
has_id ? id : NULL,
|
||||
errp);
|
||||
blk = qmp_get_blk(device, id, errp);
|
||||
if (!blk) {
|
||||
return;
|
||||
}
|
||||
@ -311,13 +300,13 @@ static void blockdev_insert_medium(bool has_device, const char *device,
|
||||
void qmp_blockdev_insert_medium(const char *id, const char *node_name,
|
||||
Error **errp)
|
||||
{
|
||||
blockdev_insert_medium(false, NULL, true, id, node_name, errp);
|
||||
blockdev_insert_medium(NULL, id, node_name, errp);
|
||||
}
|
||||
|
||||
void qmp_blockdev_change_medium(bool has_device, const char *device,
|
||||
bool has_id, const char *id,
|
||||
void qmp_blockdev_change_medium(const char *device,
|
||||
const char *id,
|
||||
const char *filename,
|
||||
bool has_format, const char *format,
|
||||
const char *format,
|
||||
bool has_force, bool force,
|
||||
bool has_read_only,
|
||||
BlockdevChangeReadOnlyMode read_only,
|
||||
@ -331,9 +320,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device,
|
||||
QDict *options = NULL;
|
||||
Error *err = NULL;
|
||||
|
||||
blk = qmp_get_blk(has_device ? device : NULL,
|
||||
has_id ? id : NULL,
|
||||
errp);
|
||||
blk = qmp_get_blk(device, id, errp);
|
||||
if (!blk) {
|
||||
goto fail;
|
||||
}
|
||||
@ -370,7 +357,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device,
|
||||
detect_zeroes = blk_get_detect_zeroes_from_root_state(blk);
|
||||
qdict_put_str(options, "detect-zeroes", detect_zeroes ? "on" : "off");
|
||||
|
||||
if (has_format) {
|
||||
if (format) {
|
||||
qdict_put_str(options, "driver", format);
|
||||
}
|
||||
|
||||
@ -379,9 +366,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = do_open_tray(has_device ? device : NULL,
|
||||
has_id ? id : NULL,
|
||||
force, &err);
|
||||
rc = do_open_tray(device, id, force, &err);
|
||||
if (rc && rc != -ENOSYS) {
|
||||
error_propagate(errp, err);
|
||||
goto fail;
|
||||
@ -389,7 +374,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device,
|
||||
error_free(err);
|
||||
err = NULL;
|
||||
|
||||
blockdev_remove_medium(has_device, device, has_id, id, &err);
|
||||
blockdev_remove_medium(device, id, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
goto fail;
|
||||
@ -401,7 +386,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
qmp_blockdev_close_tray(has_device, device, has_id, id, errp);
|
||||
qmp_blockdev_close_tray(device, id, errp);
|
||||
|
||||
fail:
|
||||
/* If the medium has been inserted, the device has its own reference, so
|
||||
@ -410,8 +395,7 @@ fail:
|
||||
bdrv_unref(medium_bs);
|
||||
}
|
||||
|
||||
void qmp_eject(bool has_device, const char *device,
|
||||
bool has_id, const char *id,
|
||||
void qmp_eject(const char *device, const char *id,
|
||||
bool has_force, bool force, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
@ -421,16 +405,14 @@ void qmp_eject(bool has_device, const char *device,
|
||||
force = false;
|
||||
}
|
||||
|
||||
rc = do_open_tray(has_device ? device : NULL,
|
||||
has_id ? id : NULL,
|
||||
force, &local_err);
|
||||
rc = do_open_tray(device, id, force, &local_err);
|
||||
if (rc && rc != -ENOSYS) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
error_free(local_err);
|
||||
|
||||
blockdev_remove_medium(has_device, device, has_id, id, errp);
|
||||
blockdev_remove_medium(device, id, errp);
|
||||
}
|
||||
|
||||
/* throttling disk I/O limits */
|
||||
@ -441,9 +423,7 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
|
||||
BlockBackend *blk;
|
||||
AioContext *aio_context;
|
||||
|
||||
blk = qmp_get_blk(arg->has_device ? arg->device : NULL,
|
||||
arg->has_id ? arg->id : NULL,
|
||||
errp);
|
||||
blk = qmp_get_blk(arg->device, arg->id, errp);
|
||||
if (!blk) {
|
||||
return;
|
||||
}
|
||||
@ -516,11 +496,8 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
|
||||
/* Enable I/O limits if they're not enabled yet, otherwise
|
||||
* just update the throttling group. */
|
||||
if (!blk_get_public(blk)->throttle_group_member.throttle_state) {
|
||||
blk_io_limits_enable(blk,
|
||||
arg->has_group ? arg->group :
|
||||
arg->has_device ? arg->device :
|
||||
arg->id);
|
||||
} else if (arg->has_group) {
|
||||
blk_io_limits_enable(blk, arg->group ?: arg->device ?: arg->id);
|
||||
} else if (arg->group) {
|
||||
blk_io_limits_update_group(blk, arg->group);
|
||||
}
|
||||
/* Set the new throttling configuration */
|
||||
|
62
block/qapi.c
62
block/qapi.c
@ -71,13 +71,11 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
|
||||
};
|
||||
|
||||
if (bs->node_name[0]) {
|
||||
info->has_node_name = true;
|
||||
info->node_name = g_strdup(bs->node_name);
|
||||
}
|
||||
|
||||
backing = bdrv_cow_bs(bs);
|
||||
if (backing) {
|
||||
info->has_backing_file = true;
|
||||
info->backing_file = g_strdup(backing->filename);
|
||||
}
|
||||
|
||||
@ -139,7 +137,6 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
|
||||
info->has_iops_size = cfg.op_size;
|
||||
info->iops_size = cfg.op_size;
|
||||
|
||||
info->has_group = true;
|
||||
info->group =
|
||||
g_strdup(throttle_group_get_name(&blkp->throttle_group_member));
|
||||
}
|
||||
@ -170,7 +167,6 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
|
||||
*/
|
||||
info->backing_file_depth++;
|
||||
bs0 = bdrv_filter_or_cow_bs(bs0);
|
||||
(*p_image_info)->has_backing_image = true;
|
||||
p_image_info = &((*p_image_info)->backing_image);
|
||||
} else {
|
||||
break;
|
||||
@ -301,26 +297,21 @@ void bdrv_query_image_info(BlockDriverState *bs,
|
||||
qapi_free_ImageInfo(info);
|
||||
goto out;
|
||||
}
|
||||
info->has_format_specific = info->format_specific != NULL;
|
||||
|
||||
backing_filename = bs->backing_file;
|
||||
if (backing_filename[0] != '\0') {
|
||||
char *backing_filename2;
|
||||
|
||||
info->backing_filename = g_strdup(backing_filename);
|
||||
info->has_backing_filename = true;
|
||||
backing_filename2 = bdrv_get_full_backing_filename(bs, NULL);
|
||||
|
||||
/* Always report the full_backing_filename if present, even if it's the
|
||||
* same as backing_filename. That they are same is useful info. */
|
||||
if (backing_filename2) {
|
||||
info->full_backing_filename = g_strdup(backing_filename2);
|
||||
info->has_full_backing_filename = true;
|
||||
}
|
||||
|
||||
if (bs->backing_format[0]) {
|
||||
info->backing_filename_format = g_strdup(bs->backing_format);
|
||||
info->has_backing_filename_format = true;
|
||||
}
|
||||
g_free(backing_filename2);
|
||||
}
|
||||
@ -367,7 +358,6 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
|
||||
|
||||
qdev = blk_get_attached_dev_id(blk);
|
||||
if (qdev && *qdev) {
|
||||
info->has_qdev = true;
|
||||
info->qdev = qdev;
|
||||
} else {
|
||||
g_free(qdev);
|
||||
@ -384,7 +374,6 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
|
||||
}
|
||||
|
||||
if (bs && bs->drv) {
|
||||
info->has_inserted = true;
|
||||
info->inserted = bdrv_block_device_info(blk, bs, false, errp);
|
||||
if (info->inserted == NULL) {
|
||||
goto err;
|
||||
@ -411,23 +400,26 @@ static uint64List *uint64_list(uint64_t *list, int size)
|
||||
return out_list;
|
||||
}
|
||||
|
||||
static void bdrv_latency_histogram_stats(BlockLatencyHistogram *hist,
|
||||
bool *not_null,
|
||||
BlockLatencyHistogramInfo **info)
|
||||
static BlockLatencyHistogramInfo *
|
||||
bdrv_latency_histogram_stats(BlockLatencyHistogram *hist)
|
||||
{
|
||||
*not_null = hist->bins != NULL;
|
||||
if (*not_null) {
|
||||
*info = g_new0(BlockLatencyHistogramInfo, 1);
|
||||
BlockLatencyHistogramInfo *info;
|
||||
|
||||
(*info)->boundaries = uint64_list(hist->boundaries, hist->nbins - 1);
|
||||
(*info)->bins = uint64_list(hist->bins, hist->nbins);
|
||||
if (!hist->bins) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
info = g_new0(BlockLatencyHistogramInfo, 1);
|
||||
info->boundaries = uint64_list(hist->boundaries, hist->nbins - 1);
|
||||
info->bins = uint64_list(hist->bins, hist->nbins);
|
||||
return info;
|
||||
}
|
||||
|
||||
static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
|
||||
{
|
||||
BlockAcctStats *stats = blk_get_stats(blk);
|
||||
BlockAcctTimedStats *ts = NULL;
|
||||
BlockLatencyHistogram *hgram;
|
||||
|
||||
ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
|
||||
ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
|
||||
@ -493,15 +485,13 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
|
||||
QAPI_LIST_PREPEND(ds->timed_stats, dev_stats);
|
||||
}
|
||||
|
||||
bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_READ],
|
||||
&ds->has_rd_latency_histogram,
|
||||
&ds->rd_latency_histogram);
|
||||
bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_WRITE],
|
||||
&ds->has_wr_latency_histogram,
|
||||
&ds->wr_latency_histogram);
|
||||
bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_FLUSH],
|
||||
&ds->has_flush_latency_histogram,
|
||||
&ds->flush_latency_histogram);
|
||||
hgram = stats->latency_histogram;
|
||||
ds->rd_latency_histogram
|
||||
= bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_READ]);
|
||||
ds->wr_latency_histogram
|
||||
= bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_WRITE]);
|
||||
ds->flush_latency_histogram
|
||||
= bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_FLUSH]);
|
||||
}
|
||||
|
||||
static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
|
||||
@ -526,16 +516,12 @@ static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
if (bdrv_get_node_name(bs)[0]) {
|
||||
s->has_node_name = true;
|
||||
s->node_name = g_strdup(bdrv_get_node_name(bs));
|
||||
}
|
||||
|
||||
s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset);
|
||||
|
||||
s->driver_specific = bdrv_get_specific_stats(bs);
|
||||
if (s->driver_specific) {
|
||||
s->has_driver_specific = true;
|
||||
}
|
||||
|
||||
parent_child = bdrv_primary_child(bs);
|
||||
if (!parent_child ||
|
||||
@ -564,7 +550,6 @@ static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
|
||||
}
|
||||
}
|
||||
if (parent_child) {
|
||||
s->has_parent = true;
|
||||
s->parent = bdrv_query_bds_stats(parent_child->bs, blk_level);
|
||||
}
|
||||
|
||||
@ -575,7 +560,6 @@ static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
|
||||
* compatibility to when we put bs0->backing here, which might
|
||||
* be either)
|
||||
*/
|
||||
s->has_backing = true;
|
||||
s->backing = bdrv_query_bds_stats(filter_or_cow_bs, blk_level);
|
||||
}
|
||||
|
||||
@ -640,12 +624,10 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
s = bdrv_query_bds_stats(blk_bs(blk), true);
|
||||
s->has_device = true;
|
||||
s->device = g_strdup(blk_name(blk));
|
||||
|
||||
qdev = blk_get_attached_dev_id(blk);
|
||||
if (qdev && *qdev) {
|
||||
s->has_qdev = true;
|
||||
s->qdev = qdev;
|
||||
} else {
|
||||
g_free(qdev);
|
||||
@ -822,16 +804,16 @@ void bdrv_image_info_dump(ImageInfo *info)
|
||||
qemu_printf("cleanly shut down: no\n");
|
||||
}
|
||||
|
||||
if (info->has_backing_filename) {
|
||||
if (info->backing_filename) {
|
||||
qemu_printf("backing file: %s", info->backing_filename);
|
||||
if (!info->has_full_backing_filename) {
|
||||
if (!info->full_backing_filename) {
|
||||
qemu_printf(" (cannot determine actual path)");
|
||||
} else if (strcmp(info->backing_filename,
|
||||
info->full_backing_filename) != 0) {
|
||||
qemu_printf(" (actual path: %s)", info->full_backing_filename);
|
||||
}
|
||||
qemu_printf("\n");
|
||||
if (info->has_backing_filename_format) {
|
||||
if (info->backing_filename_format) {
|
||||
qemu_printf("backing file format: %s\n",
|
||||
info->backing_filename_format);
|
||||
}
|
||||
@ -865,7 +847,7 @@ void bdrv_image_info_dump(ImageInfo *info)
|
||||
}
|
||||
}
|
||||
|
||||
if (info->has_format_specific) {
|
||||
if (info->format_specific) {
|
||||
qemu_printf("Format specific information:\n");
|
||||
bdrv_image_info_specific_dump(info->format_specific);
|
||||
}
|
||||
|
12
block/qcow.c
12
block/qcow.c
@ -825,7 +825,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (qcow_opts->has_encrypt &&
|
||||
if (qcow_opts->encrypt &&
|
||||
qcow_opts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_QCOW)
|
||||
{
|
||||
error_setg(errp, "Unsupported encryption format");
|
||||
@ -853,7 +853,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
|
||||
header.size = cpu_to_be64(total_size);
|
||||
header_size = sizeof(header);
|
||||
backing_filename_len = 0;
|
||||
if (qcow_opts->has_backing_file) {
|
||||
if (qcow_opts->backing_file) {
|
||||
if (strcmp(qcow_opts->backing_file, "fat:")) {
|
||||
header.backing_file_offset = cpu_to_be64(header_size);
|
||||
backing_filename_len = strlen(qcow_opts->backing_file);
|
||||
@ -861,7 +861,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
|
||||
header_size += backing_filename_len;
|
||||
} else {
|
||||
/* special backing file for vvfat */
|
||||
qcow_opts->has_backing_file = false;
|
||||
qcow_opts->backing_file = NULL;
|
||||
}
|
||||
header.cluster_bits = 9; /* 512 byte cluster to avoid copying
|
||||
unmodified sectors */
|
||||
@ -876,7 +876,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
|
||||
|
||||
header.l1_table_offset = cpu_to_be64(header_size);
|
||||
|
||||
if (qcow_opts->has_encrypt) {
|
||||
if (qcow_opts->encrypt) {
|
||||
header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
|
||||
|
||||
crypto = qcrypto_block_create(qcow_opts->encrypt, "encrypt.",
|
||||
@ -895,7 +895,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (qcow_opts->has_backing_file) {
|
||||
if (qcow_opts->backing_file) {
|
||||
ret = blk_co_pwrite(qcow_blk, sizeof(header), backing_filename_len,
|
||||
qcow_opts->backing_file, 0);
|
||||
if (ret < 0) {
|
||||
@ -973,7 +973,7 @@ static int coroutine_fn qcow_co_create_opts(BlockDriver *drv,
|
||||
}
|
||||
|
||||
/* Create and open the file (protocol layer) */
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
@ -3508,7 +3508,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
if (!qcow2_opts->has_preallocation) {
|
||||
qcow2_opts->preallocation = PREALLOC_MODE_OFF;
|
||||
}
|
||||
if (qcow2_opts->has_backing_file &&
|
||||
if (qcow2_opts->backing_file &&
|
||||
qcow2_opts->preallocation != PREALLOC_MODE_OFF &&
|
||||
!qcow2_opts->extended_l2)
|
||||
{
|
||||
@ -3517,7 +3517,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) {
|
||||
if (qcow2_opts->has_backing_fmt && !qcow2_opts->backing_file) {
|
||||
error_setg(errp, "Backing format cannot be used without backing file");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
@ -3558,7 +3558,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (qcow2_opts->data_file_raw && qcow2_opts->has_backing_file) {
|
||||
if (qcow2_opts->data_file_raw && qcow2_opts->backing_file) {
|
||||
error_setg(errp, "Backing file and data-file-raw cannot be used at "
|
||||
"the same time");
|
||||
ret = -EINVAL;
|
||||
@ -3584,7 +3584,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
* backing file when specifying data_file_raw is an error
|
||||
* anyway.
|
||||
*/
|
||||
assert(!qcow2_opts->has_backing_file);
|
||||
assert(!qcow2_opts->backing_file);
|
||||
}
|
||||
|
||||
if (qcow2_opts->data_file) {
|
||||
@ -3752,7 +3752,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
}
|
||||
|
||||
/* Want a backing file? There you go. */
|
||||
if (qcow2_opts->has_backing_file) {
|
||||
if (qcow2_opts->backing_file) {
|
||||
const char *backing_format = NULL;
|
||||
|
||||
if (qcow2_opts->has_backing_fmt) {
|
||||
@ -3770,7 +3770,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
}
|
||||
|
||||
/* Want encryption? There you go. */
|
||||
if (qcow2_opts->has_encrypt) {
|
||||
if (qcow2_opts->encrypt) {
|
||||
ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
@ -3871,7 +3871,7 @@ static int coroutine_fn qcow2_co_create_opts(BlockDriver *drv,
|
||||
}
|
||||
|
||||
/* Create and open the file (protocol layer) */
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto finish;
|
||||
}
|
||||
@ -3886,7 +3886,7 @@ static int coroutine_fn qcow2_co_create_opts(BlockDriver *drv,
|
||||
/* Create and open an external data file (protocol layer) */
|
||||
val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE);
|
||||
if (val) {
|
||||
ret = bdrv_create_file(val, opts, errp);
|
||||
ret = bdrv_co_create_file(val, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto finish;
|
||||
}
|
||||
@ -5195,7 +5195,6 @@ static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs,
|
||||
.refcount_bits = s->refcount_bits,
|
||||
.has_bitmaps = !!bitmaps,
|
||||
.bitmaps = bitmaps,
|
||||
.has_data_file = !!s->image_data_file,
|
||||
.data_file = g_strdup(s->image_data_file),
|
||||
.has_data_file_raw = has_data_file(bs),
|
||||
.data_file_raw = data_file_is_raw(bs),
|
||||
@ -5226,7 +5225,6 @@ static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs,
|
||||
memset(&encrypt_info->u, 0, sizeof(encrypt_info->u));
|
||||
qapi_free_QCryptoBlockInfo(encrypt_info);
|
||||
|
||||
spec_info->u.qcow2.data->has_encrypt = true;
|
||||
spec_info->u.qcow2.data->encrypt = qencrypt;
|
||||
}
|
||||
|
||||
@ -5846,7 +5844,7 @@ static int coroutine_fn qcow2_co_amend(BlockDriverState *bs,
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int ret = 0;
|
||||
|
||||
if (qopts->has_encrypt) {
|
||||
if (qopts->encrypt) {
|
||||
if (!s->crypto) {
|
||||
error_setg(errp, "image is not encrypted, can't amend");
|
||||
return -EOPNOTSUPP;
|
||||
@ -5911,7 +5909,7 @@ void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
|
||||
|
||||
node_name = bdrv_get_node_name(bs);
|
||||
qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs),
|
||||
*node_name != '\0', node_name,
|
||||
*node_name ? node_name : NULL,
|
||||
message, offset >= 0, offset,
|
||||
size >= 0, size,
|
||||
fatal);
|
||||
|
30
block/qed.c
30
block/qed.c
@ -262,7 +262,7 @@ static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s)
|
||||
assert(!s->allocating_write_reqs_plugged);
|
||||
if (s->allocating_acb != NULL) {
|
||||
/* Another allocating write came concurrently. This cannot happen
|
||||
* from bdrv_qed_co_drain_begin, but it can happen when the timer runs.
|
||||
* from bdrv_qed_drain_begin, but it can happen when the timer runs.
|
||||
*/
|
||||
qemu_co_mutex_unlock(&s->table_lock);
|
||||
return false;
|
||||
@ -282,9 +282,8 @@ static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s)
|
||||
qemu_co_mutex_unlock(&s->table_lock);
|
||||
}
|
||||
|
||||
static void coroutine_fn qed_need_check_timer_entry(void *opaque)
|
||||
static void coroutine_fn qed_need_check_timer(BDRVQEDState *s)
|
||||
{
|
||||
BDRVQEDState *s = opaque;
|
||||
int ret;
|
||||
|
||||
trace_qed_need_check_timer_cb(s);
|
||||
@ -310,9 +309,20 @@ static void coroutine_fn qed_need_check_timer_entry(void *opaque)
|
||||
(void) ret;
|
||||
}
|
||||
|
||||
static void coroutine_fn qed_need_check_timer_entry(void *opaque)
|
||||
{
|
||||
BDRVQEDState *s = opaque;
|
||||
|
||||
qed_need_check_timer(opaque);
|
||||
bdrv_dec_in_flight(s->bs);
|
||||
}
|
||||
|
||||
static void qed_need_check_timer_cb(void *opaque)
|
||||
{
|
||||
BDRVQEDState *s = opaque;
|
||||
Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
|
||||
|
||||
bdrv_inc_in_flight(s->bs);
|
||||
qemu_coroutine_enter(co);
|
||||
}
|
||||
|
||||
@ -355,7 +365,7 @@ static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
|
||||
}
|
||||
}
|
||||
|
||||
static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
|
||||
static void bdrv_qed_drain_begin(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQEDState *s = bs->opaque;
|
||||
|
||||
@ -363,8 +373,12 @@ static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
|
||||
* header is flushed.
|
||||
*/
|
||||
if (s->need_check_timer && timer_pending(s->need_check_timer)) {
|
||||
Coroutine *co;
|
||||
|
||||
qed_cancel_need_check_timer(s);
|
||||
qed_need_check_timer_entry(s);
|
||||
co = qemu_coroutine_create(qed_need_check_timer_entry, s);
|
||||
bdrv_inc_in_flight(bs);
|
||||
aio_co_enter(bdrv_get_aio_context(bs), co);
|
||||
}
|
||||
}
|
||||
|
||||
@ -698,7 +712,7 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (qed_opts->has_backing_file) {
|
||||
if (qed_opts->backing_file) {
|
||||
header.features |= QED_F_BACKING_FILE;
|
||||
header.backing_filename_offset = sizeof(le_header);
|
||||
header.backing_filename_size = strlen(qed_opts->backing_file);
|
||||
@ -764,7 +778,7 @@ static int coroutine_fn bdrv_qed_co_create_opts(BlockDriver *drv,
|
||||
}
|
||||
|
||||
/* Create and open the file (protocol layer) */
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
@ -1647,7 +1661,7 @@ static BlockDriver bdrv_qed = {
|
||||
.bdrv_co_check = bdrv_qed_co_check,
|
||||
.bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
|
||||
.bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
|
||||
.bdrv_co_drain_begin = bdrv_qed_co_drain_begin,
|
||||
.bdrv_drain_begin = bdrv_qed_drain_begin,
|
||||
};
|
||||
|
||||
static void bdrv_qed_init(void)
|
||||
|
@ -202,7 +202,7 @@ static void quorum_report_bad(QuorumOpType type, uint64_t offset,
|
||||
msg = strerror(-ret);
|
||||
}
|
||||
|
||||
qapi_event_send_quorum_report_bad(type, !!msg, msg, node_name, start_sector,
|
||||
qapi_event_send_quorum_report_bad(type, msg, node_name, start_sector,
|
||||
end_sector - start_sector);
|
||||
}
|
||||
|
||||
|
@ -433,7 +433,7 @@ static int coroutine_fn raw_co_create_opts(BlockDriver *drv,
|
||||
QemuOpts *opts,
|
||||
Error **errp)
|
||||
{
|
||||
return bdrv_create_file(filename, opts, errp);
|
||||
return bdrv_co_create_file(filename, opts, errp);
|
||||
}
|
||||
|
||||
static int raw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
|
17
block/rbd.c
17
block/rbd.c
@ -536,13 +536,13 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options,
|
||||
int ret;
|
||||
|
||||
assert(options->driver == BLOCKDEV_DRIVER_RBD);
|
||||
if (opts->location->has_snapshot) {
|
||||
if (opts->location->snapshot) {
|
||||
error_setg(errp, "Can't use snapshot name for image creation");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifndef LIBRBD_SUPPORTS_ENCRYPTION
|
||||
if (opts->has_encrypt) {
|
||||
if (opts->encrypt) {
|
||||
error_setg(errp, "RBD library does not support image encryption");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
@ -574,7 +574,7 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options,
|
||||
}
|
||||
|
||||
#ifdef LIBRBD_SUPPORTS_ENCRYPTION
|
||||
if (opts->has_encrypt) {
|
||||
if (opts->encrypt) {
|
||||
rbd_image_t image;
|
||||
|
||||
ret = rbd_open(io_ctx, opts->location->image, &image, NULL);
|
||||
@ -686,7 +686,6 @@ static int coroutine_fn qemu_rbd_co_create_opts(BlockDriver *drv,
|
||||
goto exit;
|
||||
}
|
||||
rbd_opts->encrypt = encrypt;
|
||||
rbd_opts->has_encrypt = !!encrypt;
|
||||
|
||||
/*
|
||||
* Caution: while qdict_get_try_str() is fine, getting non-string
|
||||
@ -697,11 +696,8 @@ static int coroutine_fn qemu_rbd_co_create_opts(BlockDriver *drv,
|
||||
loc = rbd_opts->location;
|
||||
loc->pool = g_strdup(qdict_get_try_str(options, "pool"));
|
||||
loc->conf = g_strdup(qdict_get_try_str(options, "conf"));
|
||||
loc->has_conf = !!loc->conf;
|
||||
loc->user = g_strdup(qdict_get_try_str(options, "user"));
|
||||
loc->has_user = !!loc->user;
|
||||
loc->q_namespace = g_strdup(qdict_get_try_str(options, "namespace"));
|
||||
loc->has_q_namespace = !!loc->q_namespace;
|
||||
loc->image = g_strdup(qdict_get_try_str(options, "image"));
|
||||
keypairs = qdict_get_try_str(options, "=keyvalue-pairs");
|
||||
|
||||
@ -767,7 +763,6 @@ static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx,
|
||||
return -EINVAL;
|
||||
}
|
||||
opts->key_secret = g_strdup(secretid);
|
||||
opts->has_key_secret = true;
|
||||
}
|
||||
|
||||
mon_host = qemu_rbd_mon_host(opts, &local_err);
|
||||
@ -785,7 +780,7 @@ static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx,
|
||||
|
||||
/* try default location when conf=NULL, but ignore failure */
|
||||
r = rados_conf_read_file(*cluster, opts->conf);
|
||||
if (opts->has_conf && r < 0) {
|
||||
if (opts->conf && r < 0) {
|
||||
error_setg_errno(errp, -r, "error reading conf file %s", opts->conf);
|
||||
goto failed_shutdown;
|
||||
}
|
||||
@ -833,7 +828,7 @@ static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx,
|
||||
}
|
||||
|
||||
#ifdef HAVE_RBD_NAMESPACE_EXISTS
|
||||
if (opts->has_q_namespace && strlen(opts->q_namespace) > 0) {
|
||||
if (opts->q_namespace && strlen(opts->q_namespace) > 0) {
|
||||
bool exists;
|
||||
|
||||
r = rbd_namespace_exists(*io_ctx, opts->q_namespace, &exists);
|
||||
@ -991,7 +986,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
goto failed_open;
|
||||
}
|
||||
|
||||
if (opts->has_encrypt) {
|
||||
if (opts->encrypt) {
|
||||
#ifdef LIBRBD_SUPPORTS_ENCRYPTION
|
||||
r = qemu_rbd_encryption_load(s->image, opts->encrypt, errp);
|
||||
if (r < 0) {
|
||||
|
@ -374,9 +374,6 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
|
||||
s->orig_secondary_read_only = bdrv_is_read_only(secondary_disk->bs);
|
||||
}
|
||||
|
||||
bdrv_subtree_drained_begin(hidden_disk->bs);
|
||||
bdrv_subtree_drained_begin(secondary_disk->bs);
|
||||
|
||||
if (s->orig_hidden_read_only) {
|
||||
QDict *opts = qdict_new();
|
||||
qdict_put_bool(opts, BDRV_OPT_READ_ONLY, !writable);
|
||||
@ -401,9 +398,6 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
bdrv_subtree_drained_end(hidden_disk->bs);
|
||||
bdrv_subtree_drained_end(secondary_disk->bs);
|
||||
}
|
||||
|
||||
static void backup_job_cleanup(BlockDriverState *bs)
|
||||
|
@ -643,7 +643,7 @@ static int connect_to_ssh(BDRVSSHState *s, BlockdevOptionsSsh *opts,
|
||||
unsigned int port = 0;
|
||||
int new_sock = -1;
|
||||
|
||||
if (opts->has_user) {
|
||||
if (opts->user) {
|
||||
s->user = g_strdup(opts->user);
|
||||
} else {
|
||||
s->user = g_strdup(g_get_user_name());
|
||||
|
@ -64,13 +64,16 @@ static int stream_prepare(Job *job)
|
||||
bdrv_cor_filter_drop(s->cor_filter_bs);
|
||||
s->cor_filter_bs = NULL;
|
||||
|
||||
bdrv_subtree_drained_begin(s->above_base);
|
||||
/*
|
||||
* bdrv_set_backing_hd() requires that unfiltered_bs is drained. Drain
|
||||
* already here and use bdrv_set_backing_hd_drained() instead because
|
||||
* the polling during drained_begin() might change the graph, and if we do
|
||||
* this only later, we may end up working with the wrong base node (or it
|
||||
* might even have gone away by the time we want to use it).
|
||||
*/
|
||||
bdrv_drained_begin(unfiltered_bs);
|
||||
|
||||
base = bdrv_filter_or_cow_bs(s->above_base);
|
||||
if (base) {
|
||||
bdrv_ref(base);
|
||||
}
|
||||
|
||||
unfiltered_base = bdrv_skip_filters(base);
|
||||
|
||||
if (bdrv_cow_child(unfiltered_bs)) {
|
||||
@ -82,7 +85,13 @@ static int stream_prepare(Job *job)
|
||||
}
|
||||
}
|
||||
|
||||
bdrv_set_backing_hd(unfiltered_bs, base, &local_err);
|
||||
bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err);
|
||||
|
||||
/*
|
||||
* This call will do I/O, so the graph can change again from here on.
|
||||
* We have already completed the graph change, so we are not in danger
|
||||
* of operating on the wrong node any more if this happens.
|
||||
*/
|
||||
ret = bdrv_change_backing_file(unfiltered_bs, base_id, base_fmt, false);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
@ -92,10 +101,7 @@ static int stream_prepare(Job *job)
|
||||
}
|
||||
|
||||
out:
|
||||
if (base) {
|
||||
bdrv_unref(base);
|
||||
}
|
||||
bdrv_subtree_drained_end(s->above_base);
|
||||
bdrv_drained_end(unfiltered_bs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ static void throttle_reopen_abort(BDRVReopenState *reopen_state)
|
||||
reopen_state->opaque = NULL;
|
||||
}
|
||||
|
||||
static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs)
|
||||
static void throttle_drain_begin(BlockDriverState *bs)
|
||||
{
|
||||
ThrottleGroupMember *tgm = bs->opaque;
|
||||
if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
|
||||
@ -222,7 +222,7 @@ static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs)
|
||||
}
|
||||
}
|
||||
|
||||
static void coroutine_fn throttle_co_drain_end(BlockDriverState *bs)
|
||||
static void throttle_drain_end(BlockDriverState *bs)
|
||||
{
|
||||
ThrottleGroupMember *tgm = bs->opaque;
|
||||
assert(tgm->io_limits_disabled);
|
||||
@ -261,8 +261,8 @@ static BlockDriver bdrv_throttle = {
|
||||
.bdrv_reopen_commit = throttle_reopen_commit,
|
||||
.bdrv_reopen_abort = throttle_reopen_abort,
|
||||
|
||||
.bdrv_co_drain_begin = throttle_co_drain_begin,
|
||||
.bdrv_co_drain_end = throttle_co_drain_end,
|
||||
.bdrv_drain_begin = throttle_drain_begin,
|
||||
.bdrv_drain_end = throttle_drain_end,
|
||||
|
||||
.is_filter = true,
|
||||
.strong_runtime_opts = throttle_strong_runtime_opts,
|
||||
|
@ -934,7 +934,7 @@ static int coroutine_fn vdi_co_create_opts(BlockDriver *drv,
|
||||
qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vdi_create_opts, true);
|
||||
|
||||
/* Create and open the file (protocol layer) */
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto done;
|
||||
}
|
||||
|
@ -2084,7 +2084,7 @@ static int coroutine_fn vhdx_co_create_opts(BlockDriver *drv,
|
||||
}
|
||||
|
||||
/* Create and open the file (protocol layer) */
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
30
block/vmdk.c
30
block/vmdk.c
@ -2285,15 +2285,16 @@ exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmdk_create_extent(const char *filename, int64_t filesize,
|
||||
bool flat, bool compress, bool zeroed_grain,
|
||||
static int coroutine_fn vmdk_create_extent(const char *filename,
|
||||
int64_t filesize, bool flat,
|
||||
bool compress, bool zeroed_grain,
|
||||
BlockBackend **pbb,
|
||||
QemuOpts *opts, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
BlockBackend *blk = NULL;
|
||||
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto exit;
|
||||
}
|
||||
@ -2366,7 +2367,7 @@ static int filename_decompose(const char *filename, char *path, char *prefix,
|
||||
* non-split format.
|
||||
* idx >= 1: get the n-th extent if in a split subformat
|
||||
*/
|
||||
typedef BlockBackend *(*vmdk_create_extent_fn)(int64_t size,
|
||||
typedef BlockBackend * coroutine_fn (*vmdk_create_extent_fn)(int64_t size,
|
||||
int idx,
|
||||
bool flat,
|
||||
bool split,
|
||||
@ -2616,7 +2617,7 @@ typedef struct {
|
||||
QemuOpts *opts;
|
||||
} VMDKCreateOptsData;
|
||||
|
||||
static BlockBackend *vmdk_co_create_opts_cb(int64_t size, int idx,
|
||||
static BlockBackend * coroutine_fn vmdk_co_create_opts_cb(int64_t size, int idx,
|
||||
bool flat, bool split, bool compress,
|
||||
bool zeroed_grain, void *opaque,
|
||||
Error **errp)
|
||||
@ -2768,10 +2769,11 @@ exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static BlockBackend *vmdk_co_create_cb(int64_t size, int idx,
|
||||
bool flat, bool split, bool compress,
|
||||
bool zeroed_grain, void *opaque,
|
||||
Error **errp)
|
||||
static BlockBackend * coroutine_fn vmdk_co_create_cb(int64_t size, int idx,
|
||||
bool flat, bool split,
|
||||
bool compress,
|
||||
bool zeroed_grain,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
BlockDriverState *bs;
|
||||
@ -2821,7 +2823,6 @@ static BlockBackend *vmdk_co_create_cb(int64_t size, int idx,
|
||||
static int coroutine_fn vmdk_co_create(BlockdevCreateOptions *create_options,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
BlockdevCreateOptionsVmdk *opts;
|
||||
|
||||
opts = &create_options->u.vmdk;
|
||||
@ -2829,11 +2830,10 @@ static int coroutine_fn vmdk_co_create(BlockdevCreateOptions *create_options,
|
||||
/* Validate options */
|
||||
if (!QEMU_IS_ALIGNED(opts->size, BDRV_SECTOR_SIZE)) {
|
||||
error_setg(errp, "Image size must be a multiple of 512 bytes");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = vmdk_co_do_create(opts->size,
|
||||
return vmdk_co_do_create(opts->size,
|
||||
opts->subformat,
|
||||
opts->adapter_type,
|
||||
opts->backing_file,
|
||||
@ -2843,10 +2843,6 @@ static int coroutine_fn vmdk_co_create(BlockdevCreateOptions *create_options,
|
||||
opts->zeroed_grain,
|
||||
vmdk_co_create_cb,
|
||||
opts, errp);
|
||||
return ret;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmdk_close(BlockDriverState *bs)
|
||||
|
@ -1111,7 +1111,7 @@ static int coroutine_fn vpc_co_create_opts(BlockDriver *drv,
|
||||
}
|
||||
|
||||
/* Create and open the file (protocol layer) */
|
||||
ret = bdrv_create_file(filename, opts, errp);
|
||||
ret = bdrv_co_create_file(filename, opts, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
@ -173,8 +173,8 @@ void nbd_server_start_options(NbdServerOptions *arg, Error **errp)
|
||||
}
|
||||
|
||||
void qmp_nbd_server_start(SocketAddressLegacy *addr,
|
||||
bool has_tls_creds, const char *tls_creds,
|
||||
bool has_tls_authz, const char *tls_authz,
|
||||
const char *tls_creds,
|
||||
const char *tls_authz,
|
||||
bool has_max_connections, uint32_t max_connections,
|
||||
Error **errp)
|
||||
{
|
||||
@ -200,8 +200,7 @@ void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp)
|
||||
* block-export-add would default to the node-name, but we may have to use
|
||||
* the device name as a default here for compatibility.
|
||||
*/
|
||||
if (!arg->has_name) {
|
||||
arg->has_name = true;
|
||||
if (!arg->name) {
|
||||
arg->name = g_strdup(arg->device);
|
||||
}
|
||||
|
||||
@ -215,7 +214,7 @@ void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp)
|
||||
};
|
||||
QAPI_CLONE_MEMBERS(BlockExportOptionsNbdBase, &export_opts->u.nbd,
|
||||
qapi_NbdServerAddOptions_base(arg));
|
||||
if (arg->has_bitmap) {
|
||||
if (arg->bitmap) {
|
||||
BlockDirtyBitmapOrStr *el = g_new(BlockDirtyBitmapOrStr, 1);
|
||||
|
||||
*el = (BlockDirtyBitmapOrStr) {
|
||||
|
202
blockdev.c
202
blockdev.c
@ -1048,26 +1048,20 @@ static void blockdev_do_action(TransactionAction *action, Error **errp)
|
||||
|
||||
list.value = action;
|
||||
list.next = NULL;
|
||||
qmp_transaction(&list, false, NULL, errp);
|
||||
qmp_transaction(&list, NULL, errp);
|
||||
}
|
||||
|
||||
void qmp_blockdev_snapshot_sync(bool has_device, const char *device,
|
||||
bool has_node_name, const char *node_name,
|
||||
void qmp_blockdev_snapshot_sync(const char *device, const char *node_name,
|
||||
const char *snapshot_file,
|
||||
bool has_snapshot_node_name,
|
||||
const char *snapshot_node_name,
|
||||
bool has_format, const char *format,
|
||||
const char *format,
|
||||
bool has_mode, NewImageMode mode, Error **errp)
|
||||
{
|
||||
BlockdevSnapshotSync snapshot = {
|
||||
.has_device = has_device,
|
||||
.device = (char *) device,
|
||||
.has_node_name = has_node_name,
|
||||
.node_name = (char *) node_name,
|
||||
.snapshot_file = (char *) snapshot_file,
|
||||
.has_snapshot_node_name = has_snapshot_node_name,
|
||||
.snapshot_node_name = (char *) snapshot_node_name,
|
||||
.has_format = has_format,
|
||||
.format = (char *) format,
|
||||
.has_mode = has_mode,
|
||||
.mode = mode,
|
||||
@ -1109,9 +1103,7 @@ void qmp_blockdev_snapshot_internal_sync(const char *device,
|
||||
}
|
||||
|
||||
SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
|
||||
bool has_id,
|
||||
const char *id,
|
||||
bool has_name,
|
||||
const char *name,
|
||||
Error **errp)
|
||||
{
|
||||
@ -1129,14 +1121,6 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
if (!has_id) {
|
||||
id = NULL;
|
||||
}
|
||||
|
||||
if (!has_name) {
|
||||
name = NULL;
|
||||
}
|
||||
|
||||
if (!id && !name) {
|
||||
error_setg(errp, "Name or id must be provided");
|
||||
goto out_aio_context;
|
||||
@ -1450,8 +1434,8 @@ static void external_snapshot_prepare(BlkActionState *common,
|
||||
case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC:
|
||||
{
|
||||
BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
|
||||
device = s->has_device ? s->device : NULL;
|
||||
node_name = s->has_node_name ? s->node_name : NULL;
|
||||
device = s->device;
|
||||
node_name = s->node_name;
|
||||
new_image_file = s->snapshot_file;
|
||||
snapshot_ref = NULL;
|
||||
}
|
||||
@ -1495,10 +1479,9 @@ static void external_snapshot_prepare(BlkActionState *common,
|
||||
|
||||
if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
|
||||
BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
|
||||
const char *format = s->has_format ? s->format : "qcow2";
|
||||
const char *format = s->format ?: "qcow2";
|
||||
enum NewImageMode mode;
|
||||
const char *snapshot_node_name =
|
||||
s->has_snapshot_node_name ? s->snapshot_node_name : NULL;
|
||||
const char *snapshot_node_name = s->snapshot_node_name;
|
||||
|
||||
if (node_name && !snapshot_node_name) {
|
||||
error_setg(errp, "New overlay node-name missing");
|
||||
@ -1524,10 +1507,14 @@ static void external_snapshot_prepare(BlkActionState *common,
|
||||
goto out;
|
||||
}
|
||||
bdrv_refresh_filename(state->old_bs);
|
||||
|
||||
aio_context_release(aio_context);
|
||||
bdrv_img_create(new_image_file, format,
|
||||
state->old_bs->filename,
|
||||
state->old_bs->drv->format_name,
|
||||
NULL, size, flags, false, &local_err);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
goto out;
|
||||
@ -1686,6 +1673,7 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
|
||||
BlockDriverState *source = NULL;
|
||||
AioContext *aio_context;
|
||||
AioContext *old_context;
|
||||
const char *format;
|
||||
QDict *options;
|
||||
Error *local_err = NULL;
|
||||
int flags;
|
||||
@ -1717,9 +1705,9 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
|
||||
/* Paired with .clean() */
|
||||
bdrv_drained_begin(bs);
|
||||
|
||||
if (!backup->has_format) {
|
||||
backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ?
|
||||
NULL : (char *) bs->drv->format_name;
|
||||
format = backup->format;
|
||||
if (!format && backup->mode != NEW_IMAGE_MODE_EXISTING) {
|
||||
format = bs->drv->format_name;
|
||||
}
|
||||
|
||||
/* Early check to avoid creating target */
|
||||
@ -1758,19 +1746,19 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
|
||||
}
|
||||
|
||||
if (backup->mode != NEW_IMAGE_MODE_EXISTING) {
|
||||
assert(backup->format);
|
||||
assert(format);
|
||||
if (source) {
|
||||
/* Implicit filters should not appear in the filename */
|
||||
BlockDriverState *explicit_backing =
|
||||
bdrv_skip_implicit_filters(source);
|
||||
|
||||
bdrv_refresh_filename(explicit_backing);
|
||||
bdrv_img_create(backup->target, backup->format,
|
||||
bdrv_img_create(backup->target, format,
|
||||
explicit_backing->filename,
|
||||
explicit_backing->drv->format_name, NULL,
|
||||
size, flags, false, &local_err);
|
||||
} else {
|
||||
bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL,
|
||||
bdrv_img_create(backup->target, format, NULL, NULL, NULL,
|
||||
size, flags, false, &local_err);
|
||||
}
|
||||
}
|
||||
@ -1783,8 +1771,8 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
|
||||
options = qdict_new();
|
||||
qdict_put_str(options, "discard", "unmap");
|
||||
qdict_put_str(options, "detect-zeroes", "unmap");
|
||||
if (backup->format) {
|
||||
qdict_put_str(options, "driver", backup->format);
|
||||
if (format) {
|
||||
qdict_put_str(options, "driver", format);
|
||||
}
|
||||
|
||||
target_bs = bdrv_open(backup->target, NULL, options, flags, errp);
|
||||
@ -2305,11 +2293,11 @@ static TransactionProperties *get_transaction_properties(
|
||||
* Always run under BQL.
|
||||
*/
|
||||
void qmp_transaction(TransactionActionList *dev_list,
|
||||
bool has_props,
|
||||
struct TransactionProperties *props,
|
||||
Error **errp)
|
||||
{
|
||||
TransactionActionList *dev_entry = dev_list;
|
||||
bool has_props = !!props;
|
||||
JobTxn *block_job_txn = NULL;
|
||||
BlkActionState *state, *next;
|
||||
Error *local_err = NULL;
|
||||
@ -2411,8 +2399,7 @@ BlockDirtyBitmapSha256 *qmp_x_debug_block_dirty_bitmap_sha256(const char *node,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void coroutine_fn qmp_block_resize(bool has_device, const char *device,
|
||||
bool has_node_name, const char *node_name,
|
||||
void coroutine_fn qmp_block_resize(const char *device, const char *node_name,
|
||||
int64_t size, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
@ -2420,9 +2407,7 @@ void coroutine_fn qmp_block_resize(bool has_device, const char *device,
|
||||
BlockDriverState *bs;
|
||||
AioContext *old_ctx;
|
||||
|
||||
bs = bdrv_lookup_bs(has_device ? device : NULL,
|
||||
has_node_name ? node_name : NULL,
|
||||
&local_err);
|
||||
bs = bdrv_lookup_bs(device, node_name, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
@ -2457,14 +2442,14 @@ void coroutine_fn qmp_block_resize(bool has_device, const char *device,
|
||||
bdrv_co_unlock(bs);
|
||||
}
|
||||
|
||||
void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
||||
bool has_base, const char *base,
|
||||
bool has_base_node, const char *base_node,
|
||||
bool has_backing_file, const char *backing_file,
|
||||
bool has_bottom, const char *bottom,
|
||||
void qmp_block_stream(const char *job_id, const char *device,
|
||||
const char *base,
|
||||
const char *base_node,
|
||||
const char *backing_file,
|
||||
const char *bottom,
|
||||
bool has_speed, int64_t speed,
|
||||
bool has_on_error, BlockdevOnError on_error,
|
||||
bool has_filter_node_name, const char *filter_node_name,
|
||||
const char *filter_node_name,
|
||||
bool has_auto_finalize, bool auto_finalize,
|
||||
bool has_auto_dismiss, bool auto_dismiss,
|
||||
Error **errp)
|
||||
@ -2476,19 +2461,19 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
||||
Error *local_err = NULL;
|
||||
int job_flags = JOB_DEFAULT;
|
||||
|
||||
if (has_base && has_base_node) {
|
||||
if (base && base_node) {
|
||||
error_setg(errp, "'base' and 'base-node' cannot be specified "
|
||||
"at the same time");
|
||||
return;
|
||||
}
|
||||
|
||||
if (has_base && has_bottom) {
|
||||
if (base && bottom) {
|
||||
error_setg(errp, "'base' and 'bottom' cannot be specified "
|
||||
"at the same time");
|
||||
return;
|
||||
}
|
||||
|
||||
if (has_bottom && has_base_node) {
|
||||
if (bottom && base_node) {
|
||||
error_setg(errp, "'bottom' and 'base-node' cannot be specified "
|
||||
"at the same time");
|
||||
return;
|
||||
@ -2506,7 +2491,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
if (has_base) {
|
||||
if (base) {
|
||||
base_bs = bdrv_find_backing_image(bs, base);
|
||||
if (base_bs == NULL) {
|
||||
error_setg(errp, "Can't find '%s' in the backing chain", base);
|
||||
@ -2515,7 +2500,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
||||
assert(bdrv_get_aio_context(base_bs) == aio_context);
|
||||
}
|
||||
|
||||
if (has_base_node) {
|
||||
if (base_node) {
|
||||
base_bs = bdrv_lookup_bs(NULL, base_node, errp);
|
||||
if (!base_bs) {
|
||||
goto out;
|
||||
@ -2529,7 +2514,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
||||
bdrv_refresh_filename(base_bs);
|
||||
}
|
||||
|
||||
if (has_bottom) {
|
||||
if (bottom) {
|
||||
bottom_bs = bdrv_lookup_bs(NULL, bottom, errp);
|
||||
if (!bottom_bs) {
|
||||
goto out;
|
||||
@ -2554,7 +2539,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
||||
/*
|
||||
* Check for op blockers in the whole chain between bs and base (or bottom)
|
||||
*/
|
||||
iter_end = has_bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs;
|
||||
iter_end = bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs;
|
||||
for (iter = bs; iter && iter != iter_end;
|
||||
iter = bdrv_filter_or_cow_bs(iter))
|
||||
{
|
||||
@ -2565,7 +2550,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
||||
|
||||
/* if we are streaming the entire chain, the result will have no backing
|
||||
* file, and specifying one is therefore an error */
|
||||
if (base_bs == NULL && has_backing_file) {
|
||||
if (!base_bs && backing_file) {
|
||||
error_setg(errp, "backing file specified, but streaming the "
|
||||
"entire chain");
|
||||
goto out;
|
||||
@ -2578,7 +2563,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
||||
job_flags |= JOB_MANUAL_DISMISS;
|
||||
}
|
||||
|
||||
stream_start(has_job_id ? job_id : NULL, bs, base_bs, backing_file,
|
||||
stream_start(job_id, bs, base_bs, backing_file,
|
||||
bottom_bs, job_flags, has_speed ? speed : 0, on_error,
|
||||
filter_node_name, &local_err);
|
||||
if (local_err) {
|
||||
@ -2592,15 +2577,15 @@ out:
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
bool has_base_node, const char *base_node,
|
||||
bool has_base, const char *base,
|
||||
bool has_top_node, const char *top_node,
|
||||
bool has_top, const char *top,
|
||||
bool has_backing_file, const char *backing_file,
|
||||
void qmp_block_commit(const char *job_id, const char *device,
|
||||
const char *base_node,
|
||||
const char *base,
|
||||
const char *top_node,
|
||||
const char *top,
|
||||
const char *backing_file,
|
||||
bool has_speed, int64_t speed,
|
||||
bool has_on_error, BlockdevOnError on_error,
|
||||
bool has_filter_node_name, const char *filter_node_name,
|
||||
const char *filter_node_name,
|
||||
bool has_auto_finalize, bool auto_finalize,
|
||||
bool has_auto_dismiss, bool auto_dismiss,
|
||||
Error **errp)
|
||||
@ -2619,9 +2604,6 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
if (!has_on_error) {
|
||||
on_error = BLOCKDEV_ON_ERROR_REPORT;
|
||||
}
|
||||
if (!has_filter_node_name) {
|
||||
filter_node_name = NULL;
|
||||
}
|
||||
if (has_auto_finalize && !auto_finalize) {
|
||||
job_flags |= JOB_MANUAL_FINALIZE;
|
||||
}
|
||||
@ -2657,10 +2639,10 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
/* default top_bs is the active layer */
|
||||
top_bs = bs;
|
||||
|
||||
if (has_top_node && has_top) {
|
||||
if (top_node && top) {
|
||||
error_setg(errp, "'top-node' and 'top' are mutually exclusive");
|
||||
goto out;
|
||||
} else if (has_top_node) {
|
||||
} else if (top_node) {
|
||||
top_bs = bdrv_lookup_bs(NULL, top_node, errp);
|
||||
if (top_bs == NULL) {
|
||||
goto out;
|
||||
@ -2670,7 +2652,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
top_node);
|
||||
goto out;
|
||||
}
|
||||
} else if (has_top && top) {
|
||||
} else if (top) {
|
||||
/* This strcmp() is just a shortcut, there is no need to
|
||||
* refresh @bs's filename. If it mismatches,
|
||||
* bdrv_find_backing_image() will do the refresh and may still
|
||||
@ -2687,10 +2669,10 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
|
||||
assert(bdrv_get_aio_context(top_bs) == aio_context);
|
||||
|
||||
if (has_base_node && has_base) {
|
||||
if (base_node && base) {
|
||||
error_setg(errp, "'base-node' and 'base' are mutually exclusive");
|
||||
goto out;
|
||||
} else if (has_base_node) {
|
||||
} else if (base_node) {
|
||||
base_bs = bdrv_lookup_bs(NULL, base_node, errp);
|
||||
if (base_bs == NULL) {
|
||||
goto out;
|
||||
@ -2700,7 +2682,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
base_node);
|
||||
goto out;
|
||||
}
|
||||
} else if (has_base && base) {
|
||||
} else if (base) {
|
||||
base_bs = bdrv_find_backing_image(top_bs, base);
|
||||
if (base_bs == NULL) {
|
||||
error_setg(errp, "Can't find '%s' in the backing chain", base);
|
||||
@ -2742,7 +2724,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
if (top_perm & BLK_PERM_WRITE ||
|
||||
bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs))
|
||||
{
|
||||
if (has_backing_file) {
|
||||
if (backing_file) {
|
||||
if (bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs)) {
|
||||
error_setg(errp, "'backing-file' specified,"
|
||||
" but 'top' is the active layer");
|
||||
@ -2752,7 +2734,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
if (!has_job_id) {
|
||||
if (!job_id) {
|
||||
/*
|
||||
* Emulate here what block_job_create() does, because it
|
||||
* is possible that @bs != @top_bs (the block job should
|
||||
@ -2768,8 +2750,8 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
|
||||
if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
|
||||
goto out;
|
||||
}
|
||||
commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, job_flags,
|
||||
speed, on_error, has_backing_file ? backing_file : NULL,
|
||||
commit_start(job_id, bs, base_bs, top_bs, job_flags,
|
||||
speed, on_error, backing_file,
|
||||
filter_node_name, &local_err);
|
||||
}
|
||||
if (local_err != NULL) {
|
||||
@ -2802,9 +2784,6 @@ static BlockJob *do_backup_common(BackupCommon *backup,
|
||||
if (!backup->has_on_target_error) {
|
||||
backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT;
|
||||
}
|
||||
if (!backup->has_job_id) {
|
||||
backup->job_id = NULL;
|
||||
}
|
||||
if (!backup->has_auto_finalize) {
|
||||
backup->auto_finalize = true;
|
||||
}
|
||||
@ -2830,7 +2809,7 @@ static BlockJob *do_backup_common(BackupCommon *backup,
|
||||
if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
|
||||
(backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) {
|
||||
/* done before desugaring 'incremental' to print the right message */
|
||||
if (!backup->has_bitmap) {
|
||||
if (!backup->bitmap) {
|
||||
error_setg(errp, "must provide a valid bitmap name for "
|
||||
"'%s' sync mode", MirrorSyncMode_str(backup->sync));
|
||||
return NULL;
|
||||
@ -2851,7 +2830,7 @@ static BlockJob *do_backup_common(BackupCommon *backup,
|
||||
backup->bitmap_mode = BITMAP_SYNC_MODE_ON_SUCCESS;
|
||||
}
|
||||
|
||||
if (backup->has_bitmap) {
|
||||
if (backup->bitmap) {
|
||||
bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap);
|
||||
if (!bmap) {
|
||||
error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap);
|
||||
@ -2884,7 +2863,7 @@ static BlockJob *do_backup_common(BackupCommon *backup,
|
||||
}
|
||||
}
|
||||
|
||||
if (!backup->has_bitmap && backup->has_bitmap_mode) {
|
||||
if (!backup->bitmap && backup->has_bitmap_mode) {
|
||||
error_setg(errp, "Cannot specify bitmap sync mode without a bitmap");
|
||||
return NULL;
|
||||
}
|
||||
@ -2944,7 +2923,7 @@ void qmp_blockdev_backup(BlockdevBackup *backup, Error **errp)
|
||||
**/
|
||||
static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
|
||||
BlockDriverState *target,
|
||||
bool has_replaces, const char *replaces,
|
||||
const char *replaces,
|
||||
enum MirrorSyncMode sync,
|
||||
BlockMirrorBackingMode backing_mode,
|
||||
bool zero_target,
|
||||
@ -2956,7 +2935,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
|
||||
bool has_on_target_error,
|
||||
BlockdevOnError on_target_error,
|
||||
bool has_unmap, bool unmap,
|
||||
bool has_filter_node_name,
|
||||
const char *filter_node_name,
|
||||
bool has_copy_mode, MirrorCopyMode copy_mode,
|
||||
bool has_auto_finalize, bool auto_finalize,
|
||||
@ -2984,9 +2962,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
|
||||
if (!has_unmap) {
|
||||
unmap = true;
|
||||
}
|
||||
if (!has_filter_node_name) {
|
||||
filter_node_name = NULL;
|
||||
}
|
||||
if (!has_copy_mode) {
|
||||
copy_mode = MIRROR_COPY_MODE_BACKGROUND;
|
||||
}
|
||||
@ -3019,16 +2994,15 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
|
||||
sync = MIRROR_SYNC_MODE_FULL;
|
||||
}
|
||||
|
||||
if (!has_replaces) {
|
||||
if (!replaces) {
|
||||
/* We want to mirror from @bs, but keep implicit filters on top */
|
||||
unfiltered_bs = bdrv_skip_implicit_filters(bs);
|
||||
if (unfiltered_bs != bs) {
|
||||
replaces = unfiltered_bs->node_name;
|
||||
has_replaces = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_replaces) {
|
||||
if (replaces) {
|
||||
BlockDriverState *to_replace_bs;
|
||||
AioContext *replace_aio_context;
|
||||
int64_t bs_size, replace_size;
|
||||
@ -3065,7 +3039,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
|
||||
* and will allow to check whether the node still exist at mirror completion
|
||||
*/
|
||||
mirror_start(job_id, bs, target,
|
||||
has_replaces ? replaces : NULL, job_flags,
|
||||
replaces, job_flags,
|
||||
speed, granularity, buf_size, sync, backing_mode, zero_target,
|
||||
on_source_error, on_target_error, unmap, filter_node_name,
|
||||
copy_mode, errp);
|
||||
@ -3103,7 +3077,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
arg->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
|
||||
}
|
||||
|
||||
if (!arg->has_format) {
|
||||
if (!arg->format) {
|
||||
format = (arg->mode == NEW_IMAGE_MODE_EXISTING
|
||||
? NULL : bs->drv->format_name);
|
||||
}
|
||||
@ -3123,8 +3097,8 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (arg->has_replaces) {
|
||||
if (!arg->has_node_name) {
|
||||
if (arg->replaces) {
|
||||
if (!arg->node_name) {
|
||||
error_setg(errp, "a node-name must be provided when replacing a"
|
||||
" named node of the graph");
|
||||
goto out;
|
||||
@ -3174,7 +3148,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
}
|
||||
|
||||
options = qdict_new();
|
||||
if (arg->has_node_name) {
|
||||
if (arg->node_name) {
|
||||
qdict_put_str(options, "node-name", arg->node_name);
|
||||
}
|
||||
if (format) {
|
||||
@ -3209,8 +3183,8 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
aio_context_release(old_context);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
blockdev_mirror_common(arg->has_job_id ? arg->job_id : NULL, bs, target_bs,
|
||||
arg->has_replaces, arg->replaces, arg->sync,
|
||||
blockdev_mirror_common(arg->job_id, bs, target_bs,
|
||||
arg->replaces, arg->sync,
|
||||
backing_mode, zero_target,
|
||||
arg->has_speed, arg->speed,
|
||||
arg->has_granularity, arg->granularity,
|
||||
@ -3218,7 +3192,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
arg->has_on_source_error, arg->on_source_error,
|
||||
arg->has_on_target_error, arg->on_target_error,
|
||||
arg->has_unmap, arg->unmap,
|
||||
false, NULL,
|
||||
NULL,
|
||||
arg->has_copy_mode, arg->copy_mode,
|
||||
arg->has_auto_finalize, arg->auto_finalize,
|
||||
arg->has_auto_dismiss, arg->auto_dismiss,
|
||||
@ -3228,9 +3202,9 @@ out:
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
|
||||
void qmp_blockdev_mirror(const char *job_id,
|
||||
const char *device, const char *target,
|
||||
bool has_replaces, const char *replaces,
|
||||
const char *replaces,
|
||||
MirrorSyncMode sync,
|
||||
bool has_speed, int64_t speed,
|
||||
bool has_granularity, uint32_t granularity,
|
||||
@ -3239,7 +3213,6 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
|
||||
BlockdevOnError on_source_error,
|
||||
bool has_on_target_error,
|
||||
BlockdevOnError on_target_error,
|
||||
bool has_filter_node_name,
|
||||
const char *filter_node_name,
|
||||
bool has_copy_mode, MirrorCopyMode copy_mode,
|
||||
bool has_auto_finalize, bool auto_finalize,
|
||||
@ -3280,15 +3253,14 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
|
||||
goto out;
|
||||
}
|
||||
|
||||
blockdev_mirror_common(has_job_id ? job_id : NULL, bs, target_bs,
|
||||
has_replaces, replaces, sync, backing_mode,
|
||||
blockdev_mirror_common(job_id, bs, target_bs,
|
||||
replaces, sync, backing_mode,
|
||||
zero_target, has_speed, speed,
|
||||
has_granularity, granularity,
|
||||
has_buf_size, buf_size,
|
||||
has_on_source_error, on_source_error,
|
||||
has_on_target_error, on_target_error,
|
||||
true, true,
|
||||
has_filter_node_name, filter_node_name,
|
||||
true, true, filter_node_name,
|
||||
has_copy_mode, copy_mode,
|
||||
has_auto_finalize, auto_finalize,
|
||||
has_auto_dismiss, auto_dismiss,
|
||||
@ -3547,8 +3519,6 @@ fail:
|
||||
void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp)
|
||||
{
|
||||
BlockReopenQueue *queue = NULL;
|
||||
GSList *drained = NULL;
|
||||
GSList *p;
|
||||
|
||||
/* Add each one of the BDS that we want to reopen to the queue */
|
||||
for (; reopen_list != NULL; reopen_list = reopen_list->next) {
|
||||
@ -3560,7 +3530,7 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp)
|
||||
QDict *qdict;
|
||||
|
||||
/* Check for the selected node name */
|
||||
if (!options->has_node_name) {
|
||||
if (!options->node_name) {
|
||||
error_setg(errp, "node-name not specified");
|
||||
goto fail;
|
||||
}
|
||||
@ -3585,9 +3555,7 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp)
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
bdrv_subtree_drained_begin(bs);
|
||||
queue = bdrv_reopen_queue(queue, bs, qdict, false);
|
||||
drained = g_slist_prepend(drained, bs);
|
||||
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
@ -3598,15 +3566,6 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp)
|
||||
|
||||
fail:
|
||||
bdrv_reopen_queue_free(queue);
|
||||
for (p = drained; p; p = p->next) {
|
||||
BlockDriverState *bs = p->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_subtree_drained_end(bs);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
g_slist_free(drained);
|
||||
}
|
||||
|
||||
void qmp_blockdev_del(const char *node_name, Error **errp)
|
||||
@ -3665,8 +3624,7 @@ static BdrvChild *bdrv_find_child(BlockDriverState *parent_bs,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void qmp_x_blockdev_change(const char *parent, bool has_child,
|
||||
const char *child, bool has_node,
|
||||
void qmp_x_blockdev_change(const char *parent, const char *child,
|
||||
const char *node, Error **errp)
|
||||
{
|
||||
BlockDriverState *parent_bs, *new_bs = NULL;
|
||||
@ -3677,8 +3635,8 @@ void qmp_x_blockdev_change(const char *parent, bool has_child,
|
||||
return;
|
||||
}
|
||||
|
||||
if (has_child == has_node) {
|
||||
if (has_child) {
|
||||
if (!child == !node) {
|
||||
if (child) {
|
||||
error_setg(errp, "The parameters child and node are in conflict");
|
||||
} else {
|
||||
error_setg(errp, "Either child or node must be specified");
|
||||
@ -3686,7 +3644,7 @@ void qmp_x_blockdev_change(const char *parent, bool has_child,
|
||||
return;
|
||||
}
|
||||
|
||||
if (has_child) {
|
||||
if (child) {
|
||||
p_child = bdrv_find_child(parent_bs, child);
|
||||
if (!p_child) {
|
||||
error_setg(errp, "Node '%s' does not have child '%s'",
|
||||
@ -3696,7 +3654,7 @@ void qmp_x_blockdev_change(const char *parent, bool has_child,
|
||||
bdrv_del_child(parent_bs, p_child, errp);
|
||||
}
|
||||
|
||||
if (has_node) {
|
||||
if (node) {
|
||||
new_bs = bdrv_find_node(node);
|
||||
if (!new_bs) {
|
||||
error_setg(errp, "Node '%s' not found", node);
|
||||
|
@ -120,7 +120,7 @@ static bool child_job_drained_poll(BdrvChild *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void child_job_drained_end(BdrvChild *c, int *drained_end_counter)
|
||||
static void child_job_drained_end(BdrvChild *c)
|
||||
{
|
||||
BlockJob *job = c->opaque;
|
||||
job_resume(&job->job);
|
||||
@ -354,7 +354,6 @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
|
||||
info->auto_finalize = job->job.auto_finalize;
|
||||
info->auto_dismiss = job->job.auto_dismiss;
|
||||
if (job->job.ret) {
|
||||
info->has_error = true;
|
||||
info->error = job->job.err ?
|
||||
g_strdup(error_get_pretty(job->job.err)) :
|
||||
g_strdup(strerror(-job->job.ret));
|
||||
@ -414,7 +413,6 @@ static void block_job_event_completed_locked(Notifier *n, void *opaque)
|
||||
progress_total,
|
||||
progress_current,
|
||||
job->speed,
|
||||
!!msg,
|
||||
msg);
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ static abi_ulong copy_elf_strings(int argc, char **argv, void **page,
|
||||
--p; --tmp; --len;
|
||||
if (--offset < 0) {
|
||||
offset = p % TARGET_PAGE_SIZE;
|
||||
pag = (char *)page[p / TARGET_PAGE_SIZE];
|
||||
pag = page[p / TARGET_PAGE_SIZE];
|
||||
if (!pag) {
|
||||
pag = g_try_malloc0(TARGET_PAGE_SIZE);
|
||||
page[p / TARGET_PAGE_SIZE] = pag;
|
||||
|
@ -198,7 +198,7 @@ int qmp_chardev_open_file_source(char *src, int flags, Error **errp)
|
||||
{
|
||||
int fd = -1;
|
||||
|
||||
TFR(fd = qemu_open_old(src, flags, 0666));
|
||||
fd = RETRY_ON_EINTR(qemu_open_old(src, flags, 0666));
|
||||
if (fd == -1) {
|
||||
error_setg_file_open(errp, errno, src);
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ static void qmp_chardev_open_file(Chardev *chr,
|
||||
DWORD accessmode;
|
||||
DWORD flags;
|
||||
|
||||
if (file->has_in) {
|
||||
if (file->in) {
|
||||
error_setg(errp, "input file not supported");
|
||||
return;
|
||||
}
|
||||
@ -83,7 +83,7 @@ static void qmp_chardev_open_file(Chardev *chr,
|
||||
return;
|
||||
}
|
||||
|
||||
if (file->has_in) {
|
||||
if (file->in) {
|
||||
flags = O_RDONLY;
|
||||
in = qmp_chardev_open_file_source(file->in, flags, errp);
|
||||
if (in < 0) {
|
||||
|
@ -238,7 +238,6 @@ static void qemu_chr_open_pp_fd(Chardev *chr,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CHARDEV_PARPORT
|
||||
static void qmp_chardev_open_parallel(Chardev *chr,
|
||||
ChardevBackend *backend,
|
||||
bool *be_opened,
|
||||
@ -276,29 +275,21 @@ static void char_parallel_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
cc->parse = qemu_chr_parse_parallel;
|
||||
cc->open = qmp_chardev_open_parallel;
|
||||
#if defined(__linux__)
|
||||
cc->chr_ioctl = pp_ioctl;
|
||||
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
|
||||
defined(__DragonFly__)
|
||||
cc->chr_ioctl = pp_ioctl;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void char_parallel_finalize(Object *obj)
|
||||
{
|
||||
#if defined(__linux__)
|
||||
Chardev *chr = CHARDEV(obj);
|
||||
ParallelChardev *drv = PARALLEL_CHARDEV(chr);
|
||||
int fd = drv->fd;
|
||||
|
||||
#if defined(__linux__)
|
||||
pp_hw_mode(drv, IEEE1284_MODE_COMPAT);
|
||||
ioctl(fd, PPRELEASE);
|
||||
#endif
|
||||
close(fd);
|
||||
qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
|
||||
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
|
||||
defined(__DragonFly__)
|
||||
/* FIXME: close fd? */
|
||||
#endif
|
||||
}
|
||||
|
||||
static const TypeInfo char_parallel_type_info = {
|
||||
@ -315,5 +306,3 @@ static void register_types(void)
|
||||
}
|
||||
|
||||
type_init(register_types);
|
||||
|
||||
#endif
|
||||
|
@ -131,8 +131,8 @@ static void qemu_chr_open_pipe(Chardev *chr,
|
||||
|
||||
filename_in = g_strdup_printf("%s.in", filename);
|
||||
filename_out = g_strdup_printf("%s.out", filename);
|
||||
TFR(fd_in = qemu_open_old(filename_in, O_RDWR | O_BINARY));
|
||||
TFR(fd_out = qemu_open_old(filename_out, O_RDWR | O_BINARY));
|
||||
fd_in = RETRY_ON_EINTR(qemu_open_old(filename_in, O_RDWR | O_BINARY));
|
||||
fd_out = RETRY_ON_EINTR(qemu_open_old(filename_out, O_RDWR | O_BINARY));
|
||||
g_free(filename_in);
|
||||
g_free(filename_out);
|
||||
if (fd_in < 0 || fd_out < 0) {
|
||||
@ -142,7 +142,9 @@ static void qemu_chr_open_pipe(Chardev *chr,
|
||||
if (fd_out >= 0) {
|
||||
close(fd_out);
|
||||
}
|
||||
TFR(fd_in = fd_out = qemu_open_old(filename, O_RDWR | O_BINARY));
|
||||
fd_in = fd_out = RETRY_ON_EINTR(
|
||||
qemu_open_old(filename, O_RDWR | O_BINARY)
|
||||
);
|
||||
if (fd_in < 0) {
|
||||
error_setg_file_open(errp, errno, filename);
|
||||
return;
|
||||
|
@ -93,9 +93,7 @@ static void pty_chr_update_read_handler(Chardev *chr)
|
||||
pfd.fd = fioc->fd;
|
||||
pfd.events = G_IO_OUT;
|
||||
pfd.revents = 0;
|
||||
do {
|
||||
rc = g_poll(&pfd, 1, 0);
|
||||
} while (rc == -1 && errno == EINTR);
|
||||
rc = RETRY_ON_EINTR(g_poll(&pfd, 1, 0));
|
||||
assert(rc >= 0);
|
||||
|
||||
if (pfd.revents & G_IO_HUP) {
|
||||
|
@ -1251,7 +1251,7 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock,
|
||||
"'fd' address type");
|
||||
return false;
|
||||
}
|
||||
if (sock->has_tls_creds &&
|
||||
if (sock->tls_creds &&
|
||||
!(sock->has_server && sock->server)) {
|
||||
error_setg(errp,
|
||||
"'tls_creds' option is incompatible with "
|
||||
@ -1261,7 +1261,7 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock,
|
||||
break;
|
||||
|
||||
case SOCKET_ADDRESS_TYPE_UNIX:
|
||||
if (sock->has_tls_creds) {
|
||||
if (sock->tls_creds) {
|
||||
error_setg(errp,
|
||||
"'tls_creds' option is incompatible with "
|
||||
"'unix' address type");
|
||||
@ -1273,7 +1273,7 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock,
|
||||
break;
|
||||
|
||||
case SOCKET_ADDRESS_TYPE_VSOCK:
|
||||
if (sock->has_tls_creds) {
|
||||
if (sock->tls_creds) {
|
||||
error_setg(errp,
|
||||
"'tls_creds' option is incompatible with "
|
||||
"'vsock' address type");
|
||||
@ -1284,7 +1284,7 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock,
|
||||
break;
|
||||
}
|
||||
|
||||
if (sock->has_tls_authz && !sock->has_tls_creds) {
|
||||
if (sock->tls_authz && !sock->tls_creds) {
|
||||
error_setg(errp, "'tls_authz' option requires 'tls_creds' option");
|
||||
return false;
|
||||
}
|
||||
@ -1465,9 +1465,7 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
|
||||
sock->wait = qemu_opt_get_bool(opts, "wait", true);
|
||||
sock->has_reconnect = qemu_opt_find(opts, "reconnect");
|
||||
sock->reconnect = qemu_opt_get_number(opts, "reconnect", 0);
|
||||
sock->has_tls_creds = qemu_opt_get(opts, "tls-creds");
|
||||
sock->tls_creds = g_strdup(qemu_opt_get(opts, "tls-creds"));
|
||||
sock->has_tls_authz = qemu_opt_get(opts, "tls-authz");
|
||||
sock->tls_authz = g_strdup(qemu_opt_get(opts, "tls-authz"));
|
||||
|
||||
addr = g_new0(SocketAddressLegacy, 1);
|
||||
|
@ -178,7 +178,6 @@ static void qemu_chr_parse_udp(QemuOpts *opts, ChardevBackend *backend,
|
||||
udp->remote = addr;
|
||||
|
||||
if (has_local) {
|
||||
udp->has_local = true;
|
||||
addr = g_new0(SocketAddressLegacy, 1);
|
||||
addr->type = SOCKET_ADDRESS_TYPE_INET;
|
||||
addr->u.inet.data = g_new(InetSocketAddress, 1);
|
||||
|
@ -240,7 +240,7 @@ static void qemu_char_open(Chardev *chr, ChardevBackend *backend,
|
||||
/* Any ChardevCommon member would work */
|
||||
ChardevCommon *common = backend ? backend->u.null.data : NULL;
|
||||
|
||||
if (common && common->has_logfile) {
|
||||
if (common && common->logfile) {
|
||||
int flags = O_WRONLY;
|
||||
if (common->has_logappend &&
|
||||
common->logappend) {
|
||||
@ -496,9 +496,7 @@ void qemu_chr_parse_common(QemuOpts *opts, ChardevCommon *backend)
|
||||
{
|
||||
const char *logfile = qemu_opt_get(opts, "logfile");
|
||||
|
||||
backend->has_logfile = logfile != NULL;
|
||||
backend->logfile = g_strdup(logfile);
|
||||
|
||||
backend->has_logappend = true;
|
||||
backend->logappend = qemu_opt_get_bool(opts, "logappend", false);
|
||||
}
|
||||
@ -532,19 +530,6 @@ static const ChardevClass *char_get_class(const char *driver, Error **errp)
|
||||
return cc;
|
||||
}
|
||||
|
||||
static struct ChardevAlias {
|
||||
const char *typename;
|
||||
const char *alias;
|
||||
bool deprecation_warning_printed;
|
||||
} chardev_alias_table[] = {
|
||||
#ifdef HAVE_CHARDEV_PARPORT
|
||||
{ "parallel", "parport" },
|
||||
#endif
|
||||
#ifdef HAVE_CHARDEV_SERIAL
|
||||
{ "serial", "tty" },
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct ChadevClassFE {
|
||||
void (*fn)(const char *name, void *opaque);
|
||||
void *opaque;
|
||||
@ -580,28 +565,12 @@ help_string_append(const char *name, void *opaque)
|
||||
g_string_append_printf(str, "\n %s", name);
|
||||
}
|
||||
|
||||
static const char *chardev_alias_translate(const char *name)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < (int)ARRAY_SIZE(chardev_alias_table); i++) {
|
||||
if (g_strcmp0(chardev_alias_table[i].alias, name) == 0) {
|
||||
if (!chardev_alias_table[i].deprecation_warning_printed) {
|
||||
warn_report("The alias '%s' is deprecated, use '%s' instead",
|
||||
name, chardev_alias_table[i].typename);
|
||||
chardev_alias_table[i].deprecation_warning_printed = true;
|
||||
}
|
||||
return chardev_alias_table[i].typename;
|
||||
}
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
ChardevBackend *qemu_chr_parse_opts(QemuOpts *opts, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
const ChardevClass *cc;
|
||||
ChardevBackend *backend = NULL;
|
||||
const char *name = chardev_alias_translate(qemu_opt_get(opts, "backend"));
|
||||
const char *name = qemu_opt_get(opts, "backend");
|
||||
|
||||
if (name == NULL) {
|
||||
error_setg(errp, "chardev: \"%s\" missing backend",
|
||||
@ -639,7 +608,7 @@ Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context,
|
||||
const ChardevClass *cc;
|
||||
Chardev *chr = NULL;
|
||||
ChardevBackend *backend = NULL;
|
||||
const char *name = chardev_alias_translate(qemu_opt_get(opts, "backend"));
|
||||
const char *name = qemu_opt_get(opts, "backend");
|
||||
const char *id = qemu_opts_id(opts);
|
||||
char *bid = NULL;
|
||||
|
||||
@ -1057,7 +1026,6 @@ ChardevReturn *qmp_chardev_add(const char *id, ChardevBackend *backend,
|
||||
ret = g_new0(ChardevReturn, 1);
|
||||
if (CHARDEV_IS_PTY(chr)) {
|
||||
ret->pty = g_strdup(chr->filename + 4);
|
||||
ret->has_pty = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1160,7 +1128,6 @@ ChardevReturn *qmp_chardev_change(const char *id, ChardevBackend *backend,
|
||||
ret = g_new0(ChardevReturn, 1);
|
||||
if (CHARDEV_IS_PTY(chr_new)) {
|
||||
ret->pty = g_strdup(chr_new->filename + 4);
|
||||
ret->has_pty = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -14,9 +14,12 @@ chardev_ss.add(files(
|
||||
))
|
||||
chardev_ss.add(when: 'CONFIG_POSIX', if_true: [files(
|
||||
'char-fd.c',
|
||||
'char-parallel.c',
|
||||
'char-pty.c',
|
||||
), util])
|
||||
if targetos in ['linux', 'gnu/kfreebsd', 'freebsd', 'dragonfly']
|
||||
chardev_ss.add(files('char-parallel.c'))
|
||||
endif
|
||||
|
||||
chardev_ss.add(when: 'CONFIG_WIN32', if_true: files(
|
||||
'char-console.c',
|
||||
'char-win-stdio.c',
|
||||
|
@ -30,6 +30,7 @@ CONFIG_COLLIE=y
|
||||
CONFIG_ASPEED_SOC=y
|
||||
CONFIG_NETDUINO2=y
|
||||
CONFIG_NETDUINOPLUS2=y
|
||||
CONFIG_OLIMEX_STM32_H405=y
|
||||
CONFIG_MPS2=y
|
||||
CONFIG_RASPI=y
|
||||
CONFIG_DIGIC=y
|
||||
|
@ -17,9 +17,7 @@ CONFIG_I8254=y
|
||||
CONFIG_PCSPK=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_FDC=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_PIIX4=y
|
||||
CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_PIIX4=y
|
||||
CONFIG_IDE_ISA=y
|
||||
@ -32,6 +30,5 @@ CONFIG_MIPS_ITU=y
|
||||
CONFIG_MALTA=y
|
||||
CONFIG_PCNET_PCI=y
|
||||
CONFIG_MIPSSIM=y
|
||||
CONFIG_ACPI_SMBUS=y
|
||||
CONFIG_SMBUS_EEPROM=y
|
||||
CONFIG_TEST_DEVICES=y
|
||||
|
101
configure
vendored
101
configure
vendored
@ -83,9 +83,10 @@ rm -f config.log
|
||||
# Print a helpful header at the top of config.log
|
||||
echo "# QEMU configure log $(date)" >> config.log
|
||||
printf "# Configured with:" >> config.log
|
||||
printf " '%s'" "$0" "$@" >> config.log
|
||||
echo >> config.log
|
||||
echo "#" >> config.log
|
||||
# repeat the invocation to log and stdout for CI
|
||||
invoke=$(printf " '%s'" "$0" "$@")
|
||||
test -n "$GITLAB_CI" && echo "configuring with: $invoke"
|
||||
{ echo "$invoke"; echo; echo "#"; } >> config.log
|
||||
|
||||
quote_sh() {
|
||||
printf "%s" "$1" | sed "s,','\\\\'',g; s,.*,'&',"
|
||||
@ -210,10 +211,6 @@ version_ge () {
|
||||
done
|
||||
}
|
||||
|
||||
glob() {
|
||||
eval test -z '"${1#'"$2"'}"'
|
||||
}
|
||||
|
||||
if printf %s\\n "$source_path" "$PWD" | grep -q "[[:space:]:]";
|
||||
then
|
||||
error_exit "main directory cannot contain spaces nor colons"
|
||||
@ -341,9 +338,6 @@ for opt do
|
||||
;;
|
||||
esac
|
||||
done
|
||||
# OS specific
|
||||
# Using uname is really, really broken. Once we have the right set of checks
|
||||
# we can eliminate its usage altogether.
|
||||
|
||||
# Preferred compiler:
|
||||
# ${CC} (if set)
|
||||
@ -372,6 +366,7 @@ smbd="$SMBD"
|
||||
strip="${STRIP-${cross_prefix}strip}"
|
||||
widl="${WIDL-${cross_prefix}widl}"
|
||||
windres="${WINDRES-${cross_prefix}windres}"
|
||||
windmc="${WINDMC-${cross_prefix}windmc}"
|
||||
pkg_config_exe="${PKG_CONFIG-${cross_prefix}pkg-config}"
|
||||
query_pkg_config() {
|
||||
"${pkg_config_exe}" ${QEMU_PKG_CONFIG_FLAGS} "$@"
|
||||
@ -385,8 +380,6 @@ sdl2_config="${SDL2_CONFIG-${cross_prefix}sdl2-config}"
|
||||
# 2s-complement style results. (Both clang and gcc agree that it
|
||||
# provides these semantics.)
|
||||
QEMU_CFLAGS="-fno-strict-aliasing -fno-common -fwrapv"
|
||||
QEMU_CFLAGS="-Wundef -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS"
|
||||
QEMU_CFLAGS="-Wstrict-prototypes -Wredundant-decls $QEMU_CFLAGS"
|
||||
QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS"
|
||||
|
||||
QEMU_LDFLAGS=
|
||||
@ -493,13 +486,6 @@ sunos)
|
||||
QEMU_CFLAGS="-D_XOPEN_SOURCE=600 $QEMU_CFLAGS"
|
||||
# needed for TIOCWIN* defines in termios.h
|
||||
QEMU_CFLAGS="-D__EXTENSIONS__ $QEMU_CFLAGS"
|
||||
# $(uname -m) returns i86pc even on an x86_64 box, so default based on isainfo
|
||||
# Note that this check is broken for cross-compilation: if you're
|
||||
# cross-compiling to one of these OSes then you'll need to specify
|
||||
# the correct CPU with the --cpu option.
|
||||
if test -z "$cpu" && test "$(isainfo -k)" = "amd64"; then
|
||||
cpu="x86_64"
|
||||
fi
|
||||
;;
|
||||
haiku)
|
||||
pie="no"
|
||||
@ -554,16 +540,21 @@ elif check_define __aarch64__ ; then
|
||||
elif check_define __loongarch64 ; then
|
||||
cpu="loongarch64"
|
||||
else
|
||||
# Using uname is really broken, but it is just a fallback for architectures
|
||||
# that are going to use TCI anyway
|
||||
cpu=$(uname -m)
|
||||
echo "WARNING: unrecognized host CPU, proceeding with 'uname -m' output '$cpu'"
|
||||
fi
|
||||
|
||||
# Normalise host CPU name, set multilib cflags
|
||||
# Normalise host CPU name and set multilib cflags. The canonicalization
|
||||
# isn't really necessary, because the architectures that we check for
|
||||
# should not hit the 'uname -m' case, but better safe than sorry.
|
||||
# Note that this case should only have supported host CPUs, not guests.
|
||||
case "$cpu" in
|
||||
armv*b|armv*l|arm)
|
||||
cpu="arm" ;;
|
||||
|
||||
i386|i486|i586|i686|i86pc|BePC)
|
||||
i386|i486|i586|i686)
|
||||
cpu="i386"
|
||||
CPU_CFLAGS="-m32" ;;
|
||||
x32)
|
||||
@ -638,7 +629,6 @@ if test "$mingw32" = "yes" ; then
|
||||
EXESUF=".exe"
|
||||
# MinGW needs -mthreads for TLS and macro _MT.
|
||||
CONFIGURE_CFLAGS="-mthreads $CONFIGURE_CFLAGS"
|
||||
write_c_skeleton;
|
||||
prefix="/qemu"
|
||||
bindir=""
|
||||
qemu_suffix=""
|
||||
@ -870,17 +860,6 @@ for opt do
|
||||
;;
|
||||
--with-coroutine=*) coroutine="$optarg"
|
||||
;;
|
||||
--disable-zlib-test)
|
||||
;;
|
||||
--disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane)
|
||||
echo "$0: $opt is obsolete, virtio-blk data-plane is always on" >&2
|
||||
;;
|
||||
--enable-vhdx|--disable-vhdx)
|
||||
echo "$0: $opt is obsolete, VHDX driver is always built" >&2
|
||||
;;
|
||||
--enable-uuid|--disable-uuid)
|
||||
echo "$0: $opt is obsolete, UUID support is always built" >&2
|
||||
;;
|
||||
--with-git=*) git="$optarg"
|
||||
;;
|
||||
--with-git-submodules=*)
|
||||
@ -900,19 +879,10 @@ for opt do
|
||||
;;
|
||||
--gdb=*) gdb_bin="$optarg"
|
||||
;;
|
||||
# backwards compatibility options
|
||||
--enable-trace-backend=*) meson_option_parse "--enable-trace-backends=$optarg" "$optarg"
|
||||
;;
|
||||
--disable-blobs) meson_option_parse --disable-install-blobs ""
|
||||
;;
|
||||
--enable-vfio-user-server) vfio_user_server="enabled"
|
||||
;;
|
||||
--disable-vfio-user-server) vfio_user_server="disabled"
|
||||
;;
|
||||
--enable-tcmalloc) meson_option_parse --enable-malloc=tcmalloc tcmalloc
|
||||
;;
|
||||
--enable-jemalloc) meson_option_parse --enable-malloc=jemalloc jemalloc
|
||||
;;
|
||||
--as-shared-lib)
|
||||
as_shared_lib="yes"
|
||||
QEMU_CFLAGS="$QEMU_CFLAGS -fPIC -DAS_LIB=1"
|
||||
@ -1223,6 +1193,11 @@ fi
|
||||
# just silently disable some features, so it's too error prone.
|
||||
|
||||
warn_flags=
|
||||
add_to warn_flags -Wundef
|
||||
add_to warn_flags -Wwrite-strings
|
||||
add_to warn_flags -Wmissing-prototypes
|
||||
add_to warn_flags -Wstrict-prototypes
|
||||
add_to warn_flags -Wredundant-decls
|
||||
add_to warn_flags -Wold-style-declaration
|
||||
add_to warn_flags -Wold-style-definition
|
||||
add_to warn_flags -Wtype-limits
|
||||
@ -1235,6 +1210,7 @@ add_to warn_flags -Wnested-externs
|
||||
add_to warn_flags -Wendif-labels
|
||||
add_to warn_flags -Wexpansion-to-defined
|
||||
add_to warn_flags -Wimplicit-fallthrough=2
|
||||
add_to warn_flags -Wmissing-format-attribute
|
||||
|
||||
nowarn_flags=
|
||||
add_to nowarn_flags -Wno-initializer-overrides
|
||||
@ -1908,9 +1884,7 @@ probe_target_compiler() {
|
||||
# We shall skip configuring the target compiler if the user didn't
|
||||
# bother enabling an appropriate guest. This avoids building
|
||||
# extraneous firmware images and tests.
|
||||
if test "${target_list#*$1}" != "$1"; then
|
||||
break;
|
||||
else
|
||||
if test "${target_list#*$1}" = "$1"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
@ -2264,20 +2238,6 @@ if test "$have_ubsan" = "yes"; then
|
||||
QEMU_LDFLAGS="-fsanitize=undefined $QEMU_LDFLAGS"
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# Guest agent Windows MSI package
|
||||
|
||||
if test "$QEMU_GA_MANUFACTURER" = ""; then
|
||||
QEMU_GA_MANUFACTURER=QEMU
|
||||
fi
|
||||
if test "$QEMU_GA_DISTRO" = ""; then
|
||||
QEMU_GA_DISTRO=Linux
|
||||
fi
|
||||
if test "$QEMU_GA_VERSION" = ""; then
|
||||
QEMU_GA_VERSION=$(cat "$source_path"/VERSION)
|
||||
fi
|
||||
|
||||
|
||||
#######################################
|
||||
# cross-compiled firmware targets
|
||||
|
||||
@ -2373,9 +2333,9 @@ if test "$debug_tcg" = "yes" ; then
|
||||
fi
|
||||
if test "$mingw32" = "yes" ; then
|
||||
echo "CONFIG_WIN32=y" >> $config_host_mak
|
||||
echo "QEMU_GA_MANUFACTURER=${QEMU_GA_MANUFACTURER}" >> $config_host_mak
|
||||
echo "QEMU_GA_DISTRO=${QEMU_GA_DISTRO}" >> $config_host_mak
|
||||
echo "QEMU_GA_VERSION=${QEMU_GA_VERSION}" >> $config_host_mak
|
||||
echo "QEMU_GA_MANUFACTURER=${QEMU_GA_MANUFACTURER-QEMU}" >> $config_host_mak
|
||||
echo "QEMU_GA_DISTRO=${QEMU_GA_DISTRO-Linux}" >> $config_host_mak
|
||||
echo "QEMU_GA_VERSION=${QEMU_GA_VERSION-$(cat "$source_path"/VERSION)}" >> $config_host_mak
|
||||
else
|
||||
echo "CONFIG_POSIX=y" >> $config_host_mak
|
||||
fi
|
||||
@ -2451,7 +2411,7 @@ echo "QEMU_OBJCFLAGS=$QEMU_OBJCFLAGS" >> $config_host_mak
|
||||
echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak
|
||||
echo "GLIB_LIBS=$glib_libs" >> $config_host_mak
|
||||
echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak
|
||||
echo "GLIB_VERSION=$(pkg-config --modversion glib-2.0)" >> $config_host_mak
|
||||
echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak
|
||||
echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak
|
||||
echo "EXESUF=$EXESUF" >> $config_host_mak
|
||||
|
||||
@ -2596,6 +2556,7 @@ if test "$skip_meson" = no; then
|
||||
echo "strip = [$(meson_quote $strip)]" >> $cross
|
||||
echo "widl = [$(meson_quote $widl)]" >> $cross
|
||||
echo "windres = [$(meson_quote $windres)]" >> $cross
|
||||
echo "windmc = [$(meson_quote $windmc)]" >> $cross
|
||||
if test "$cross_compile" = "yes"; then
|
||||
cross_arg="--cross-file config-meson.cross"
|
||||
echo "[host_machine]" >> $cross
|
||||
@ -2642,16 +2603,6 @@ if test "$skip_meson" = no; then
|
||||
if test "$?" -ne 0 ; then
|
||||
error_exit "meson setup failed"
|
||||
fi
|
||||
else
|
||||
if test -f meson-private/cmd_line.txt; then
|
||||
# Adjust old command line options whose type was changed
|
||||
# Avoids having to use "setup --wipe" when Meson is upgraded
|
||||
perl -i -ne '
|
||||
s/^gettext = true$/gettext = auto/;
|
||||
s/^gettext = false$/gettext = disabled/;
|
||||
/^b_staticpic/ && next;
|
||||
print;' meson-private/cmd_line.txt
|
||||
fi
|
||||
fi
|
||||
|
||||
# Save the configure command line for later reuse.
|
||||
@ -2697,11 +2648,15 @@ preserve_env PKG_CONFIG
|
||||
preserve_env PKG_CONFIG_LIBDIR
|
||||
preserve_env PKG_CONFIG_PATH
|
||||
preserve_env PYTHON
|
||||
preserve_env QEMU_GA_MANUFACTURER
|
||||
preserve_env QEMU_GA_DISTRO
|
||||
preserve_env QEMU_GA_VERSION
|
||||
preserve_env SDL2_CONFIG
|
||||
preserve_env SMBD
|
||||
preserve_env STRIP
|
||||
preserve_env WIDL
|
||||
preserve_env WINDRES
|
||||
preserve_env WINDMC
|
||||
|
||||
printf "exec" >>config.status
|
||||
for i in "$0" "$@"; do
|
||||
|
@ -405,7 +405,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
|
||||
g_mutex_lock(&l1_dcache_locks[cache_idx]);
|
||||
hit_in_l1 = access_cache(l1_dcaches[cache_idx], effective_addr);
|
||||
if (!hit_in_l1) {
|
||||
insn = (InsnData *) userdata;
|
||||
insn = userdata;
|
||||
__atomic_fetch_add(&insn->l1_dmisses, 1, __ATOMIC_SEQ_CST);
|
||||
l1_dcaches[cache_idx]->misses++;
|
||||
}
|
||||
@ -419,7 +419,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
|
||||
|
||||
g_mutex_lock(&l2_ucache_locks[cache_idx]);
|
||||
if (!access_cache(l2_ucaches[cache_idx], effective_addr)) {
|
||||
insn = (InsnData *) userdata;
|
||||
insn = userdata;
|
||||
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
|
||||
l2_ucaches[cache_idx]->misses++;
|
||||
}
|
||||
@ -440,7 +440,7 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata)
|
||||
g_mutex_lock(&l1_icache_locks[cache_idx]);
|
||||
hit_in_l1 = access_cache(l1_icaches[cache_idx], insn_addr);
|
||||
if (!hit_in_l1) {
|
||||
insn = (InsnData *) userdata;
|
||||
insn = userdata;
|
||||
__atomic_fetch_add(&insn->l1_imisses, 1, __ATOMIC_SEQ_CST);
|
||||
l1_icaches[cache_idx]->misses++;
|
||||
}
|
||||
@ -454,7 +454,7 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata)
|
||||
|
||||
g_mutex_lock(&l2_ucache_locks[cache_idx]);
|
||||
if (!access_cache(l2_ucaches[cache_idx], insn_addr)) {
|
||||
insn = (InsnData *) userdata;
|
||||
insn = userdata;
|
||||
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
|
||||
l2_ucaches[cache_idx]->misses++;
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ vub_discard_write_zeroes(VubReq *req, struct iovec *iov, uint32_t iovcnt,
|
||||
|
||||
#if defined(__linux__) && defined(BLKDISCARD) && defined(BLKZEROOUT)
|
||||
VubDev *vdev_blk = req->vdev_blk;
|
||||
desc = (struct virtio_blk_discard_write_zeroes *)buf;
|
||||
desc = buf;
|
||||
uint64_t range[2] = { le64toh(desc->sector) << 9,
|
||||
le32toh(desc->num_sectors) << 9 };
|
||||
if (type == VIRTIO_BLK_T_DISCARD) {
|
||||
@ -532,9 +532,9 @@ vub_get_blocksize(int fd)
|
||||
static void
|
||||
vub_initialize_config(int fd, struct virtio_blk_config *config)
|
||||
{
|
||||
off64_t capacity;
|
||||
off_t capacity;
|
||||
|
||||
capacity = lseek64(fd, 0, SEEK_END);
|
||||
capacity = lseek(fd, 0, SEEK_END);
|
||||
config->capacity = capacity >> 9;
|
||||
config->blk_size = vub_get_blocksize(fd);
|
||||
config->size_max = 65536;
|
||||
|
@ -1597,13 +1597,13 @@ qcrypto_block_luks_amend_add_keyslot(QCryptoBlock *block,
|
||||
g_autofree char *new_password = NULL;
|
||||
g_autofree uint8_t *master_key = NULL;
|
||||
|
||||
char *secret = opts_luks->has_secret ? opts_luks->secret : luks->secret;
|
||||
char *secret = opts_luks->secret ?: luks->secret;
|
||||
|
||||
if (!opts_luks->has_new_secret) {
|
||||
if (!opts_luks->new_secret) {
|
||||
error_setg(errp, "'new-secret' is required to activate a keyslot");
|
||||
return -1;
|
||||
}
|
||||
if (opts_luks->has_old_secret) {
|
||||
if (opts_luks->old_secret) {
|
||||
error_setg(errp,
|
||||
"'old-secret' must not be given when activating keyslots");
|
||||
return -1;
|
||||
@ -1677,7 +1677,7 @@ qcrypto_block_luks_amend_erase_keyslots(QCryptoBlock *block,
|
||||
g_autofree uint8_t *tmpkey = NULL;
|
||||
g_autofree char *old_password = NULL;
|
||||
|
||||
if (opts_luks->has_new_secret) {
|
||||
if (opts_luks->new_secret) {
|
||||
error_setg(errp,
|
||||
"'new-secret' must not be given when erasing keyslots");
|
||||
return -1;
|
||||
@ -1687,14 +1687,14 @@ qcrypto_block_luks_amend_erase_keyslots(QCryptoBlock *block,
|
||||
"'iter-time' must not be given when erasing keyslots");
|
||||
return -1;
|
||||
}
|
||||
if (opts_luks->has_secret) {
|
||||
if (opts_luks->secret) {
|
||||
error_setg(errp,
|
||||
"'secret' must not be given when erasing keyslots");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Load the old password if given */
|
||||
if (opts_luks->has_old_secret) {
|
||||
if (opts_luks->old_secret) {
|
||||
old_password = qcrypto_secret_lookup_as_utf8(opts_luks->old_secret,
|
||||
errp);
|
||||
if (!old_password) {
|
||||
@ -1719,7 +1719,7 @@ qcrypto_block_luks_amend_erase_keyslots(QCryptoBlock *block,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (opts_luks->has_old_secret) {
|
||||
if (opts_luks->old_secret) {
|
||||
int rv = qcrypto_block_luks_load_key(block,
|
||||
keyslot,
|
||||
old_password,
|
||||
@ -1761,7 +1761,7 @@ qcrypto_block_luks_amend_erase_keyslots(QCryptoBlock *block,
|
||||
}
|
||||
|
||||
/* Erase all keyslots that match the given old password */
|
||||
} else if (opts_luks->has_old_secret) {
|
||||
} else if (opts_luks->old_secret) {
|
||||
|
||||
unsigned long slots_to_erase_bitmap = 0;
|
||||
size_t i;
|
||||
|
3
disas.c
3
disas.c
@ -239,7 +239,8 @@ void target_disas(FILE *out, CPUState *cpu, target_ulong code,
|
||||
}
|
||||
}
|
||||
|
||||
static int gstring_printf(FILE *stream, const char *fmt, ...)
|
||||
static int G_GNUC_PRINTF(2, 3)
|
||||
gstring_printf(FILE *stream, const char *fmt, ...)
|
||||
{
|
||||
/* We abuse the FILE parameter to pass a GString. */
|
||||
GString *s = (GString *)stream;
|
||||
|
@ -39,12 +39,6 @@ should specify an ``audiodev=`` property. Additionally, when using
|
||||
vnc, you should specify an ``audiodev=`` property if you plan to
|
||||
transmit audio through the VNC protocol.
|
||||
|
||||
``-chardev`` backend aliases ``tty`` and ``parport`` (since 6.0)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
``tty`` and ``parport`` are aliases that will be removed. Instead, the
|
||||
actual backend names ``serial`` and ``parallel`` should be used.
|
||||
|
||||
Short-form boolean options (since 6.0)
|
||||
''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
@ -58,21 +52,6 @@ and will cause a warning.
|
||||
The replacement for the ``nodelay`` short-form boolean option is ``nodelay=on``
|
||||
rather than ``delay=off``.
|
||||
|
||||
Userspace local APIC with KVM (x86, since 6.0)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Using ``-M kernel-irqchip=off`` with x86 machine types that include a local
|
||||
APIC is deprecated. The ``split`` setting is supported, as is using
|
||||
``-M kernel-irqchip=off`` with the ISA PC machine type.
|
||||
|
||||
hexadecimal sizes with scaling multipliers (since 6.0)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Input parameters that take a size value should only use a size suffix
|
||||
(such as 'k' or 'M') when the base is written in decimal, and not when
|
||||
the value is hexadecimal. That is, '0x20M' is deprecated, and should
|
||||
be written either as '32M' or as '0x2000000'.
|
||||
|
||||
``-spice password=string`` (since 6.0)
|
||||
''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
@ -114,6 +93,12 @@ form is preferred.
|
||||
Using ``-drive if=none`` to configure the OTP device of the sifive_u
|
||||
RISC-V machine is deprecated. Use ``-drive if=pflash`` instead.
|
||||
|
||||
``-no-hpet`` (since 8.0)
|
||||
''''''''''''''''''''''''
|
||||
|
||||
The HPET setting has been turned into a machine property.
|
||||
Use ``-machine hpet=off`` instead.
|
||||
|
||||
|
||||
QEMU Machine Protocol (QMP) commands
|
||||
------------------------------------
|
||||
@ -186,19 +171,6 @@ accepted incorrect commands will return an error. Users should make sure that
|
||||
all arguments passed to ``device_add`` are consistent with the documented
|
||||
property types.
|
||||
|
||||
``query-sgx`` return value member ``section-size`` (since 7.0)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Member ``section-size`` in return value elements with meta-type ``uint64`` is
|
||||
deprecated. Use ``sections`` instead.
|
||||
|
||||
|
||||
``query-sgx-capabilities`` return value member ``section-size`` (since 7.0)
|
||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Member ``section-size`` in return value elements with meta-type ``uint64`` is
|
||||
deprecated. Use ``sections`` instead.
|
||||
|
||||
System accelerators
|
||||
-------------------
|
||||
|
||||
|
@ -408,6 +408,19 @@ pcspk-audiodev=<name>``.
|
||||
|
||||
Use ``-device`` instead.
|
||||
|
||||
Hexadecimal sizes with scaling multipliers (since 8.0)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Input parameters that take a size value should only use a size suffix
|
||||
(such as 'k' or 'M') when the base is written in decimal, and not when
|
||||
the value is hexadecimal. That is, '0x20M' should be written either as
|
||||
'32M' or as '0x2000000'.
|
||||
|
||||
``-chardev`` backend aliases ``tty`` and ``parport`` (removed in 8.0)
|
||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
``tty`` and ``parport`` used to be aliases for ``serial`` and ``parallel``
|
||||
respectively. The actual backend names should be used instead.
|
||||
|
||||
QEMU Machine Protocol (QMP) commands
|
||||
------------------------------------
|
||||
@ -494,6 +507,19 @@ type of array items in query-named-block-nodes.
|
||||
|
||||
Specify the properties for the object as top-level arguments instead.
|
||||
|
||||
``query-sgx`` return value member ``section-size`` (removed in 8.0)
|
||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Member ``section-size`` in the return value of ``query-sgx``
|
||||
was superseded by ``sections``.
|
||||
|
||||
|
||||
``query-sgx-capabilities`` return value member ``section-size`` (removed in 8.0)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Member ``section-size`` in the return value of ``query-sgx-capabilities``
|
||||
was superseded by ``sections``.
|
||||
|
||||
Human Monitor Protocol (HMP) commands
|
||||
-------------------------------------
|
||||
|
||||
@ -565,9 +591,8 @@ KVM guest support on 32-bit Arm hosts (removed in 5.2)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
The Linux kernel has dropped support for allowing 32-bit Arm systems
|
||||
to host KVM guests as of the 5.7 kernel. Accordingly, QEMU is deprecating
|
||||
its support for this configuration and will remove it in a future version.
|
||||
Running 32-bit guests on a 64-bit Arm host remains supported.
|
||||
to host KVM guests as of the 5.7 kernel, and was thus removed from QEMU
|
||||
as well. Running 32-bit guests on a 64-bit Arm host remains supported.
|
||||
|
||||
RISC-V ISA Specific CPUs (removed in 5.1)
|
||||
'''''''''''''''''''''''''''''''''''''''''
|
||||
@ -617,6 +642,16 @@ x86 ``Icelake-Client`` CPU (removed in 7.1)
|
||||
There isn't ever Icelake Client CPU, it is some wrong and imaginary one.
|
||||
Use ``Icelake-Server`` instead.
|
||||
|
||||
System accelerators
|
||||
-------------------
|
||||
|
||||
Userspace local APIC with KVM (x86, removed 8.0)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
``-M kernel-irqchip=off`` cannot be used on KVM if the CPU model includes
|
||||
a local APIC. The ``split`` setting is supported, as is using ``-M
|
||||
kernel-irqchip=off`` when the CPU does not have a local APIC.
|
||||
|
||||
System emulator machines
|
||||
------------------------
|
||||
|
||||
|
@ -52,6 +52,9 @@ Under ``tests/avocado/`` as the root we have:
|
||||
for their tests. In order to enable debugging, you can set **V=1**
|
||||
environment variable. This enables verbose mode for the test and also dumps
|
||||
the entire log from bios bits and more information in case failure happens.
|
||||
You can also set **BITS_DEBUG=1** to turn on debug mode. It will enable
|
||||
verbose logs and also retain the temporary work directory the test used for
|
||||
you to inspect and run the specific commands manually.
|
||||
|
||||
In order to run this test, please perform the following steps from the QEMU
|
||||
build directory:
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. _atomics-ref:
|
||||
|
||||
=========================
|
||||
Atomic operations in QEMU
|
||||
=========================
|
||||
|
@ -26,12 +26,12 @@ called ``bdrv_foo(<same args>)``. In this case the script can help. To
|
||||
trigger the generation:
|
||||
|
||||
1. You need ``bdrv_foo`` declaration somewhere (for example, in
|
||||
``block/coroutines.h``) with the ``generated_co_wrapper`` mark,
|
||||
``block/coroutines.h``) with the ``co_wrapper`` mark,
|
||||
like this:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int generated_co_wrapper bdrv_foo(<some args>);
|
||||
int co_wrapper bdrv_foo(<some args>);
|
||||
|
||||
2. You need to feed this declaration to block-coroutine-wrapper script.
|
||||
For this, add the .h (or .c) file with the declaration to the
|
||||
@ -46,7 +46,7 @@ Links
|
||||
|
||||
1. The script location is ``scripts/block-coroutine-wrapper.py``.
|
||||
|
||||
2. Generic place for private ``generated_co_wrapper`` declarations is
|
||||
2. Generic place for private ``co_wrapper`` declarations is
|
||||
``block/coroutines.h``, for public declarations:
|
||||
``include/block/block.h``
|
||||
|
||||
|
@ -9,6 +9,7 @@ are only implementing things for HW accelerated hypervisors.
|
||||
:maxdepth: 2
|
||||
|
||||
tcg
|
||||
tcg-ops
|
||||
decodetree
|
||||
multi-thread-tcg
|
||||
tcg-icount
|
||||
|
@ -1357,7 +1357,7 @@ qmp_my_command(); everything else is produced by the generator. ::
|
||||
|
||||
$ cat example-schema.json
|
||||
{ 'struct': 'UserDefOne',
|
||||
'data': { 'integer': 'int', '*string': 'str' } }
|
||||
'data': { 'integer': 'int', '*string': 'str', '*flag': 'bool' } }
|
||||
|
||||
{ 'command': 'my-command',
|
||||
'data': { 'arg1': ['UserDefOne'] },
|
||||
@ -1410,8 +1410,9 @@ Example::
|
||||
|
||||
struct UserDefOne {
|
||||
int64_t integer;
|
||||
bool has_string;
|
||||
char *string;
|
||||
bool has_flag;
|
||||
bool flag;
|
||||
};
|
||||
|
||||
void qapi_free_UserDefOne(UserDefOne *obj);
|
||||
@ -1523,14 +1524,21 @@ Example::
|
||||
|
||||
bool visit_type_UserDefOne_members(Visitor *v, UserDefOne *obj, Error **errp)
|
||||
{
|
||||
bool has_string = !!obj->string;
|
||||
|
||||
if (!visit_type_int(v, "integer", &obj->integer, errp)) {
|
||||
return false;
|
||||
}
|
||||
if (visit_optional(v, "string", &obj->has_string)) {
|
||||
if (visit_optional(v, "string", &has_string)) {
|
||||
if (!visit_type_str(v, "string", &obj->string, errp)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (visit_optional(v, "flag", &obj->has_flag)) {
|
||||
if (!visit_type_bool(v, "flag", &obj->flag, errp)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1664,7 +1672,6 @@ Example::
|
||||
$ cat qapi-generated/example-qapi-commands.c
|
||||
[Uninteresting stuff omitted...]
|
||||
|
||||
|
||||
static void qmp_marshal_output_UserDefOne(UserDefOne *ret_in,
|
||||
QObject **ret_out, Error **errp)
|
||||
{
|
||||
@ -1748,7 +1755,7 @@ Example::
|
||||
QTAILQ_INIT(cmds);
|
||||
|
||||
qmp_register_command(cmds, "my-command",
|
||||
qmp_marshal_my_command, QCO_NO_OPTIONS);
|
||||
qmp_marshal_my_command, 0, 0);
|
||||
}
|
||||
[Uninteresting stuff omitted...]
|
||||
|
||||
@ -1917,6 +1924,12 @@ Example::
|
||||
{ "type", QLIT_QSTR("str"), },
|
||||
{}
|
||||
})),
|
||||
QLIT_QDICT(((QLitDictEntry[]) {
|
||||
{ "default", QLIT_QNULL, },
|
||||
{ "name", QLIT_QSTR("flag"), },
|
||||
{ "type", QLIT_QSTR("bool"), },
|
||||
{}
|
||||
})),
|
||||
{}
|
||||
})), },
|
||||
{ "meta-type", QLIT_QSTR("object"), },
|
||||
@ -1950,6 +1963,12 @@ Example::
|
||||
{ "name", QLIT_QSTR("str"), },
|
||||
{}
|
||||
})),
|
||||
QLIT_QDICT(((QLitDictEntry[]) {
|
||||
{ "json-type", QLIT_QSTR("boolean"), },
|
||||
{ "meta-type", QLIT_QSTR("builtin"), },
|
||||
{ "name", QLIT_QSTR("bool"), },
|
||||
{}
|
||||
})),
|
||||
{}
|
||||
}));
|
||||
|
||||
|
@ -293,6 +293,13 @@ that QEMU depends on.
|
||||
Do not include "qemu/osdep.h" from header files since the .c file will have
|
||||
already included it.
|
||||
|
||||
Headers should normally include everything they need beyond osdep.h.
|
||||
If exceptions are needed for some reason, they must be documented in
|
||||
the header. If all that's needed from a header is typedefs, consider
|
||||
putting those into qemu/typedefs.h instead of including the header.
|
||||
|
||||
Cyclic inclusion is forbidden.
|
||||
|
||||
C types
|
||||
=======
|
||||
|
||||
|
941
docs/devel/tcg-ops.rst
Normal file
941
docs/devel/tcg-ops.rst
Normal file
@ -0,0 +1,941 @@
|
||||
.. _tcg-ops-ref:
|
||||
|
||||
*******************************
|
||||
TCG Intermediate Representation
|
||||
*******************************
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
TCG (Tiny Code Generator) began as a generic backend for a C
|
||||
compiler. It was simplified to be used in QEMU. It also has its roots
|
||||
in the QOP code generator written by Paul Brook.
|
||||
|
||||
Definitions
|
||||
===========
|
||||
|
||||
TCG receives RISC-like *TCG ops* and performs some optimizations on them,
|
||||
including liveness analysis and trivial constant expression
|
||||
evaluation. TCG ops are then implemented in the host CPU back end,
|
||||
also known as the TCG target.
|
||||
|
||||
The TCG *target* is the architecture for which we generate the
|
||||
code. It is of course not the same as the "target" of QEMU which is
|
||||
the emulated architecture. As TCG started as a generic C backend used
|
||||
for cross compiling, it is assumed that the TCG target is different
|
||||
from the host, although it is never the case for QEMU.
|
||||
|
||||
In this document, we use *guest* to specify what architecture we are
|
||||
emulating; *target* always means the TCG target, the machine on which
|
||||
we are running QEMU.
|
||||
|
||||
A TCG *function* corresponds to a QEMU Translated Block (TB).
|
||||
|
||||
A TCG *temporary* is a variable only live in a basic block. Temporaries are allocated explicitly in each function.
|
||||
|
||||
A TCG *local temporary* is a variable only live in a function. Local temporaries are allocated explicitly in each function.
|
||||
|
||||
A TCG *global* is a variable which is live in all the functions
|
||||
(equivalent of a C global variable). They are defined before the
|
||||
functions defined. A TCG global can be a memory location (e.g. a QEMU
|
||||
CPU register), a fixed host register (e.g. the QEMU CPU state pointer)
|
||||
or a memory location which is stored in a register outside QEMU TBs
|
||||
(not implemented yet).
|
||||
|
||||
A TCG *basic block* corresponds to a list of instructions terminated
|
||||
by a branch instruction.
|
||||
|
||||
An operation with *undefined behavior* may result in a crash.
|
||||
|
||||
An operation with *unspecified behavior* shall not crash. However,
|
||||
the result may be one of several possibilities so may be considered
|
||||
an *undefined result*.
|
||||
|
||||
Intermediate representation
|
||||
===========================
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
TCG instructions operate on variables which are temporaries, local
|
||||
temporaries or globals. TCG instructions and variables are strongly
|
||||
typed. Two types are supported: 32 bit integers and 64 bit
|
||||
integers. Pointers are defined as an alias to 32 bit or 64 bit
|
||||
integers depending on the TCG target word size.
|
||||
|
||||
Each instruction has a fixed number of output variable operands, input
|
||||
variable operands and always constant operands.
|
||||
|
||||
The notable exception is the call instruction which has a variable
|
||||
number of outputs and inputs.
|
||||
|
||||
In the textual form, output operands usually come first, followed by
|
||||
input operands, followed by constant operands. The output type is
|
||||
included in the instruction name. Constants are prefixed with a '$'.
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
add_i32 t0, t1, t2 /* (t0 <- t1 + t2) */
|
||||
|
||||
|
||||
Assumptions
|
||||
-----------
|
||||
|
||||
Basic blocks
|
||||
^^^^^^^^^^^^
|
||||
|
||||
* Basic blocks end after branches (e.g. brcond_i32 instruction),
|
||||
goto_tb and exit_tb instructions.
|
||||
|
||||
* Basic blocks start after the end of a previous basic block, or at a
|
||||
set_label instruction.
|
||||
|
||||
After the end of a basic block, the content of temporaries is
|
||||
destroyed, but local temporaries and globals are preserved.
|
||||
|
||||
Floating point types
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* Floating point types are not supported yet
|
||||
|
||||
Pointers
|
||||
^^^^^^^^
|
||||
|
||||
* Depending on the TCG target, pointer size is 32 bit or 64
|
||||
bit. The type ``TCG_TYPE_PTR`` is an alias to ``TCG_TYPE_I32`` or
|
||||
``TCG_TYPE_I64``.
|
||||
|
||||
Helpers
|
||||
^^^^^^^
|
||||
|
||||
* Using the tcg_gen_helper_x_y it is possible to call any function
|
||||
taking i32, i64 or pointer types. By default, before calling a helper,
|
||||
all globals are stored at their canonical location and it is assumed
|
||||
that the function can modify them. By default, the helper is allowed to
|
||||
modify the CPU state or raise an exception.
|
||||
|
||||
This can be overridden using the following function modifiers:
|
||||
|
||||
- ``TCG_CALL_NO_READ_GLOBALS`` means that the helper does not read globals,
|
||||
either directly or via an exception. They will not be saved to their
|
||||
canonical locations before calling the helper.
|
||||
|
||||
- ``TCG_CALL_NO_WRITE_GLOBALS`` means that the helper does not modify any globals.
|
||||
They will only be saved to their canonical location before calling helpers,
|
||||
but they won't be reloaded afterwards.
|
||||
|
||||
- ``TCG_CALL_NO_SIDE_EFFECTS`` means that the call to the function is removed if
|
||||
the return value is not used.
|
||||
|
||||
Note that ``TCG_CALL_NO_READ_GLOBALS`` implies ``TCG_CALL_NO_WRITE_GLOBALS``.
|
||||
|
||||
On some TCG targets (e.g. x86), several calling conventions are
|
||||
supported.
|
||||
|
||||
Branches
|
||||
^^^^^^^^
|
||||
|
||||
* Use the instruction 'br' to jump to a label.
|
||||
|
||||
Code Optimizations
|
||||
------------------
|
||||
|
||||
When generating instructions, you can count on at least the following
|
||||
optimizations:
|
||||
|
||||
- Single instructions are simplified, e.g.
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
and_i32 t0, t0, $0xffffffff
|
||||
|
||||
is suppressed.
|
||||
|
||||
- A liveness analysis is done at the basic block level. The
|
||||
information is used to suppress moves from a dead variable to
|
||||
another one. It is also used to remove instructions which compute
|
||||
dead results. The later is especially useful for condition code
|
||||
optimization in QEMU.
|
||||
|
||||
In the following example:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
add_i32 t0, t1, t2
|
||||
add_i32 t0, t0, $1
|
||||
mov_i32 t0, $1
|
||||
|
||||
only the last instruction is kept.
|
||||
|
||||
|
||||
Instruction Reference
|
||||
=====================
|
||||
|
||||
Function call
|
||||
-------------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - call *<ret>* *<params>* ptr
|
||||
|
||||
- | call function 'ptr' (pointer type)
|
||||
|
|
||||
| *<ret>* optional 32 bit or 64 bit return value
|
||||
| *<params>* optional 32 bit or 64 bit parameters
|
||||
|
||||
Jumps/Labels
|
||||
------------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - set_label $label
|
||||
|
||||
- | Define label 'label' at the current program point.
|
||||
|
||||
* - br $label
|
||||
|
||||
- | Jump to label.
|
||||
|
||||
* - brcond_i32/i64 *t0*, *t1*, *cond*, *label*
|
||||
|
||||
- | Conditional jump if *t0* *cond* *t1* is true. *cond* can be:
|
||||
|
|
||||
| ``TCG_COND_EQ``
|
||||
| ``TCG_COND_NE``
|
||||
| ``TCG_COND_LT /* signed */``
|
||||
| ``TCG_COND_GE /* signed */``
|
||||
| ``TCG_COND_LE /* signed */``
|
||||
| ``TCG_COND_GT /* signed */``
|
||||
| ``TCG_COND_LTU /* unsigned */``
|
||||
| ``TCG_COND_GEU /* unsigned */``
|
||||
| ``TCG_COND_LEU /* unsigned */``
|
||||
| ``TCG_COND_GTU /* unsigned */``
|
||||
|
||||
Arithmetic
|
||||
----------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - add_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* + *t2*
|
||||
|
||||
* - sub_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* - *t2*
|
||||
|
||||
* - neg_i32/i64 *t0*, *t1*
|
||||
|
||||
- | *t0* = -*t1* (two's complement)
|
||||
|
||||
* - mul_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* * *t2*
|
||||
|
||||
* - div_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* / *t2* (signed)
|
||||
| Undefined behavior if division by zero or overflow.
|
||||
|
||||
* - divu_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* / *t2* (unsigned)
|
||||
| Undefined behavior if division by zero.
|
||||
|
||||
* - rem_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* % *t2* (signed)
|
||||
| Undefined behavior if division by zero or overflow.
|
||||
|
||||
* - remu_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* % *t2* (unsigned)
|
||||
| Undefined behavior if division by zero.
|
||||
|
||||
|
||||
Logical
|
||||
-------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - and_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* & *t2*
|
||||
|
||||
* - or_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* | *t2*
|
||||
|
||||
* - xor_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* ^ *t2*
|
||||
|
||||
* - not_i32/i64 *t0*, *t1*
|
||||
|
||||
- | *t0* = ~\ *t1*
|
||||
|
||||
* - andc_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* & ~\ *t2*
|
||||
|
||||
* - eqv_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = ~(*t1* ^ *t2*), or equivalently, *t0* = *t1* ^ ~\ *t2*
|
||||
|
||||
* - nand_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = ~(*t1* & *t2*)
|
||||
|
||||
* - nor_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = ~(*t1* | *t2*)
|
||||
|
||||
* - orc_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* | ~\ *t2*
|
||||
|
||||
* - clz_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* ? clz(*t1*) : *t2*
|
||||
|
||||
* - ctz_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* ? ctz(*t1*) : *t2*
|
||||
|
||||
* - ctpop_i32/i64 *t0*, *t1*
|
||||
|
||||
- | *t0* = number of bits set in *t1*
|
||||
|
|
||||
| With *ctpop* short for "count population", matching
|
||||
| the function name used in ``include/qemu/host-utils.h``.
|
||||
|
||||
|
||||
Shifts/Rotates
|
||||
--------------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - shl_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* << *t2*
|
||||
| Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
|
||||
|
||||
* - shr_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* >> *t2* (unsigned)
|
||||
| Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
|
||||
|
||||
* - sar_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | *t0* = *t1* >> *t2* (signed)
|
||||
| Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
|
||||
|
||||
* - rotl_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | Rotation of *t2* bits to the left
|
||||
| Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
|
||||
|
||||
* - rotr_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | Rotation of *t2* bits to the right.
|
||||
| Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
|
||||
|
||||
|
||||
Misc
|
||||
----
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - mov_i32/i64 *t0*, *t1*
|
||||
|
||||
- | *t0* = *t1*
|
||||
| Move *t1* to *t0* (both operands must have the same type).
|
||||
|
||||
* - ext8s_i32/i64 *t0*, *t1*
|
||||
|
||||
ext8u_i32/i64 *t0*, *t1*
|
||||
|
||||
ext16s_i32/i64 *t0*, *t1*
|
||||
|
||||
ext16u_i32/i64 *t0*, *t1*
|
||||
|
||||
ext32s_i64 *t0*, *t1*
|
||||
|
||||
ext32u_i64 *t0*, *t1*
|
||||
|
||||
- | 8, 16 or 32 bit sign/zero extension (both operands must have the same type)
|
||||
|
||||
* - bswap16_i32/i64 *t0*, *t1*, *flags*
|
||||
|
||||
- | 16 bit byte swap on the low bits of a 32/64 bit input.
|
||||
|
|
||||
| If *flags* & ``TCG_BSWAP_IZ``, then *t1* is known to be zero-extended from bit 15.
|
||||
| If *flags* & ``TCG_BSWAP_OZ``, then *t0* will be zero-extended from bit 15.
|
||||
| If *flags* & ``TCG_BSWAP_OS``, then *t0* will be sign-extended from bit 15.
|
||||
|
|
||||
| If neither ``TCG_BSWAP_OZ`` nor ``TCG_BSWAP_OS`` are set, then the bits of *t0* above bit 15 may contain any value.
|
||||
|
||||
* - bswap32_i64 *t0*, *t1*, *flags*
|
||||
|
||||
- | 32 bit byte swap on a 64-bit value. The flags are the same as for bswap16,
|
||||
except they apply from bit 31 instead of bit 15.
|
||||
|
||||
* - bswap32_i32 *t0*, *t1*, *flags*
|
||||
|
||||
bswap64_i64 *t0*, *t1*, *flags*
|
||||
|
||||
- | 32/64 bit byte swap. The flags are ignored, but still present
|
||||
for consistency with the other bswap opcodes.
|
||||
|
||||
* - discard_i32/i64 *t0*
|
||||
|
||||
- | Indicate that the value of *t0* won't be used later. It is useful to
|
||||
force dead code elimination.
|
||||
|
||||
* - deposit_i32/i64 *dest*, *t1*, *t2*, *pos*, *len*
|
||||
|
||||
- | Deposit *t2* as a bitfield into *t1*, placing the result in *dest*.
|
||||
|
|
||||
| The bitfield is described by *pos*/*len*, which are immediate values:
|
||||
|
|
||||
| *len* - the length of the bitfield
|
||||
| *pos* - the position of the first bit, counting from the LSB
|
||||
|
|
||||
| For example, "deposit_i32 dest, t1, t2, 8, 4" indicates a 4-bit field
|
||||
at bit 8. This operation would be equivalent to
|
||||
|
|
||||
| *dest* = (*t1* & ~0x0f00) | ((*t2* << 8) & 0x0f00)
|
||||
|
||||
* - extract_i32/i64 *dest*, *t1*, *pos*, *len*
|
||||
|
||||
sextract_i32/i64 *dest*, *t1*, *pos*, *len*
|
||||
|
||||
- | Extract a bitfield from *t1*, placing the result in *dest*.
|
||||
|
|
||||
| The bitfield is described by *pos*/*len*, which are immediate values,
|
||||
as above for deposit. For extract_*, the result will be extended
|
||||
to the left with zeros; for sextract_*, the result will be extended
|
||||
to the left with copies of the bitfield sign bit at *pos* + *len* - 1.
|
||||
|
|
||||
| For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field
|
||||
at bit 8. This operation would be equivalent to
|
||||
|
|
||||
| *dest* = (*t1* << 20) >> 28
|
||||
|
|
||||
| (using an arithmetic right shift).
|
||||
|
||||
* - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
|
||||
|
||||
- | For N = {32,64}, extract an N-bit quantity from the concatenation
|
||||
of *t2*:*t1*, beginning at *pos*. The tcg_gen_extract2_{i32,i64} expander
|
||||
accepts 0 <= *pos* <= N as inputs. The backend code generator will
|
||||
not see either 0 or N as inputs for these opcodes.
|
||||
|
||||
* - extrl_i64_i32 *t0*, *t1*
|
||||
|
||||
- | For 64-bit hosts only, extract the low 32-bits of input *t1* and place it
|
||||
into 32-bit output *t0*. Depending on the host, this may be a simple move,
|
||||
or may require additional canonicalization.
|
||||
|
||||
* - extrh_i64_i32 *t0*, *t1*
|
||||
|
||||
- | For 64-bit hosts only, extract the high 32-bits of input *t1* and place it
|
||||
into 32-bit output *t0*. Depending on the host, this may be a simple shift,
|
||||
or may require additional canonicalization.
|
||||
|
||||
|
||||
Conditional moves
|
||||
-----------------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - setcond_i32/i64 *dest*, *t1*, *t2*, *cond*
|
||||
|
||||
- | *dest* = (*t1* *cond* *t2*)
|
||||
|
|
||||
| Set *dest* to 1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
|
||||
|
||||
* - movcond_i32/i64 *dest*, *c1*, *c2*, *v1*, *v2*, *cond*
|
||||
|
||||
- | *dest* = (*c1* *cond* *c2* ? *v1* : *v2*)
|
||||
|
|
||||
| Set *dest* to *v1* if (*c1* *cond* *c2*) is true, otherwise set to *v2*.
|
||||
|
||||
|
||||
Type conversions
|
||||
----------------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - ext_i32_i64 *t0*, *t1*
|
||||
|
||||
- | Convert *t1* (32 bit) to *t0* (64 bit) and does sign extension
|
||||
|
||||
* - extu_i32_i64 *t0*, *t1*
|
||||
|
||||
- | Convert *t1* (32 bit) to *t0* (64 bit) and does zero extension
|
||||
|
||||
* - trunc_i64_i32 *t0*, *t1*
|
||||
|
||||
- | Truncate *t1* (64 bit) to *t0* (32 bit)
|
||||
|
||||
* - concat_i32_i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | Construct *t0* (64-bit) taking the low half from *t1* (32 bit) and the high half
|
||||
from *t2* (32 bit).
|
||||
|
||||
* - concat32_i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | Construct *t0* (64-bit) taking the low half from *t1* (64 bit) and the high half
|
||||
from *t2* (64 bit).
|
||||
|
||||
|
||||
Load/Store
|
||||
----------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - ld_i32/i64 *t0*, *t1*, *offset*
|
||||
|
||||
ld8s_i32/i64 *t0*, *t1*, *offset*
|
||||
|
||||
ld8u_i32/i64 *t0*, *t1*, *offset*
|
||||
|
||||
ld16s_i32/i64 *t0*, *t1*, *offset*
|
||||
|
||||
ld16u_i32/i64 *t0*, *t1*, *offset*
|
||||
|
||||
ld32s_i64 t0, *t1*, *offset*
|
||||
|
||||
ld32u_i64 t0, *t1*, *offset*
|
||||
|
||||
- | *t0* = read(*t1* + *offset*)
|
||||
|
|
||||
| Load 8, 16, 32 or 64 bits with or without sign extension from host memory.
|
||||
*offset* must be a constant.
|
||||
|
||||
* - st_i32/i64 *t0*, *t1*, *offset*
|
||||
|
||||
st8_i32/i64 *t0*, *t1*, *offset*
|
||||
|
||||
st16_i32/i64 *t0*, *t1*, *offset*
|
||||
|
||||
st32_i64 *t0*, *t1*, *offset*
|
||||
|
||||
- | write(*t0*, *t1* + *offset*)
|
||||
|
|
||||
| Write 8, 16, 32 or 64 bits to host memory.
|
||||
|
||||
All this opcodes assume that the pointed host memory doesn't correspond
|
||||
to a global. In the latter case the behaviour is unpredictable.
|
||||
|
||||
|
||||
Multiword arithmetic support
|
||||
----------------------------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - add2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high*
|
||||
|
||||
sub2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high*
|
||||
|
||||
- | Similar to add/sub, except that the double-word inputs *t1* and *t2* are
|
||||
formed from two single-word arguments, and the double-word output *t0*
|
||||
is returned in two single-word outputs.
|
||||
|
||||
* - mulu2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
|
||||
|
||||
- | Similar to mul, except two unsigned inputs *t1* and *t2* yielding the full
|
||||
double-word product *t0*. The latter is returned in two single-word outputs.
|
||||
|
||||
* - muls2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
|
||||
|
||||
- | Similar to mulu2, except the two inputs *t1* and *t2* are signed.
|
||||
|
||||
* - mulsh_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
muluh_i32/i64 *t0*, *t1*, *t2*
|
||||
|
||||
- | Provide the high part of a signed or unsigned multiply, respectively.
|
||||
|
|
||||
| If mulu2/muls2 are not provided by the backend, the tcg-op generator
|
||||
can obtain the same results by emitting a pair of opcodes, mul + muluh/mulsh.
|
||||
|
||||
|
||||
Memory Barrier support
|
||||
----------------------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - mb *<$arg>*
|
||||
|
||||
- | Generate a target memory barrier instruction to ensure memory ordering
|
||||
as being enforced by a corresponding guest memory barrier instruction.
|
||||
|
|
||||
| The ordering enforced by the backend may be stricter than the ordering
|
||||
required by the guest. It cannot be weaker. This opcode takes a constant
|
||||
argument which is required to generate the appropriate barrier
|
||||
instruction. The backend should take care to emit the target barrier
|
||||
instruction only when necessary i.e., for SMP guests and when MTTCG is
|
||||
enabled.
|
||||
|
|
||||
| The guest translators should generate this opcode for all guest instructions
|
||||
which have ordering side effects.
|
||||
|
|
||||
| Please see :ref:`atomics-ref` for more information on memory barriers.
|
||||
|
||||
|
||||
64-bit guest on 32-bit host support
|
||||
-----------------------------------
|
||||
|
||||
The following opcodes are internal to TCG. Thus they are to be implemented by
|
||||
32-bit host code generators, but are not to be emitted by guest translators.
|
||||
They are emitted as needed by inline functions within ``tcg-op.h``.
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - brcond2_i32 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *cond*, *label*
|
||||
|
||||
- | Similar to brcond, except that the 64-bit values *t0* and *t1*
|
||||
are formed from two 32-bit arguments.
|
||||
|
||||
* - setcond2_i32 *dest*, *t1_low*, *t1_high*, *t2_low*, *t2_high*, *cond*
|
||||
|
||||
- | Similar to setcond, except that the 64-bit values *t1* and *t2* are
|
||||
formed from two 32-bit arguments. The result is a 32-bit value.
|
||||
|
||||
|
||||
QEMU specific operations
|
||||
------------------------
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - exit_tb *t0*
|
||||
|
||||
- | Exit the current TB and return the value *t0* (word type).
|
||||
|
||||
* - goto_tb *index*
|
||||
|
||||
- | Exit the current TB and jump to the TB index *index* (constant) if the
|
||||
current TB was linked to this TB. Otherwise execute the next
|
||||
instructions. Only indices 0 and 1 are valid and tcg_gen_goto_tb may be issued
|
||||
at most once with each slot index per TB.
|
||||
|
||||
* - lookup_and_goto_ptr *tb_addr*
|
||||
|
||||
- | Look up a TB address *tb_addr* and jump to it if valid. If not valid,
|
||||
jump to the TCG epilogue to go back to the exec loop.
|
||||
|
|
||||
| This operation is optional. If the TCG backend does not implement the
|
||||
goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0).
|
||||
|
||||
* - qemu_ld_i32/i64 *t0*, *t1*, *flags*, *memidx*
|
||||
|
||||
qemu_st_i32/i64 *t0*, *t1*, *flags*, *memidx*
|
||||
|
||||
qemu_st8_i32 *t0*, *t1*, *flags*, *memidx*
|
||||
|
||||
- | Load data at the guest address *t1* into *t0*, or store data in *t0* at guest
|
||||
address *t1*. The _i32/_i64 size applies to the size of the input/output
|
||||
register *t0* only. The address *t1* is always sized according to the guest,
|
||||
and the width of the memory operation is controlled by *flags*.
|
||||
|
|
||||
| Both *t0* and *t1* may be split into little-endian ordered pairs of registers
|
||||
if dealing with 64-bit quantities on a 32-bit host.
|
||||
|
|
||||
| The *memidx* selects the qemu tlb index to use (e.g. user or kernel access).
|
||||
The flags are the MemOp bits, selecting the sign, width, and endianness
|
||||
of the memory access.
|
||||
|
|
||||
| For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a
|
||||
64-bit memory access specified in *flags*.
|
||||
|
|
||||
| For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of
|
||||
the memory operation is known to be 8-bit. This allows the backend to
|
||||
provide a different set of register constraints.
|
||||
|
||||
|
||||
Host vector operations
|
||||
----------------------
|
||||
|
||||
All of the vector ops have two parameters, ``TCGOP_VECL`` & ``TCGOP_VECE``.
|
||||
The former specifies the length of the vector in log2 64-bit units; the
|
||||
latter specifies the length of the element (if applicable) in log2 8-bit units.
|
||||
E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - mov_vec *v0*, *v1*
|
||||
ld_vec *v0*, *t1*
|
||||
st_vec *v0*, *t1*
|
||||
|
||||
- | Move, load and store.
|
||||
|
||||
* - dup_vec *v0*, *r1*
|
||||
|
||||
- | Duplicate the low N bits of *r1* into VECL/VECE copies across *v0*.
|
||||
|
||||
* - dupi_vec *v0*, *c*
|
||||
|
||||
- | Similarly, for a constant.
|
||||
| Smaller values will be replicated to host register size by the expanders.
|
||||
|
||||
* - dup2_vec *v0*, *r1*, *r2*
|
||||
|
||||
- | Duplicate *r2*:*r1* into VECL/64 copies across *v0*. This opcode is
|
||||
only present for 32-bit hosts.
|
||||
|
||||
* - add_vec *v0*, *v1*, *v2*
|
||||
|
||||
- | *v0* = *v1* + *v2*, in elements across the vector.
|
||||
|
||||
* - sub_vec *v0*, *v1*, *v2*
|
||||
|
||||
- | Similarly, *v0* = *v1* - *v2*.
|
||||
|
||||
* - mul_vec *v0*, *v1*, *v2*
|
||||
|
||||
- | Similarly, *v0* = *v1* * *v2*.
|
||||
|
||||
* - neg_vec *v0*, *v1*
|
||||
|
||||
- | Similarly, *v0* = -*v1*.
|
||||
|
||||
* - abs_vec *v0*, *v1*
|
||||
|
||||
- | Similarly, *v0* = *v1* < 0 ? -*v1* : *v1*, in elements across the vector.
|
||||
|
||||
* - smin_vec *v0*, *v1*, *v2*
|
||||
|
||||
umin_vec *v0*, *v1*, *v2*
|
||||
|
||||
- | Similarly, *v0* = MIN(*v1*, *v2*), for signed and unsigned element types.
|
||||
|
||||
* - smax_vec *v0*, *v1*, *v2*
|
||||
|
||||
umax_vec *v0*, *v1*, *v2*
|
||||
|
||||
- | Similarly, *v0* = MAX(*v1*, *v2*), for signed and unsigned element types.
|
||||
|
||||
* - ssadd_vec *v0*, *v1*, *v2*
|
||||
|
||||
sssub_vec *v0*, *v1*, *v2*
|
||||
|
||||
usadd_vec *v0*, *v1*, *v2*
|
||||
|
||||
ussub_vec *v0*, *v1*, *v2*
|
||||
|
||||
- | Signed and unsigned saturating addition and subtraction.
|
||||
|
|
||||
| If the true result is not representable within the element type, the
|
||||
element is set to the minimum or maximum value for the type.
|
||||
|
||||
* - and_vec *v0*, *v1*, *v2*
|
||||
|
||||
or_vec *v0*, *v1*, *v2*
|
||||
|
||||
xor_vec *v0*, *v1*, *v2*
|
||||
|
||||
andc_vec *v0*, *v1*, *v2*
|
||||
|
||||
orc_vec *v0*, *v1*, *v2*
|
||||
|
||||
not_vec *v0*, *v1*
|
||||
|
||||
- | Similarly, logical operations with and without complement.
|
||||
|
|
||||
| Note that VECE is unused.
|
||||
|
||||
* - shli_vec *v0*, *v1*, *i2*
|
||||
|
||||
shls_vec *v0*, *v1*, *s2*
|
||||
|
||||
- | Shift all elements from v1 by a scalar *i2*/*s2*. I.e.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
for (i = 0; i < VECL/VECE; ++i) {
|
||||
v0[i] = v1[i] << s2;
|
||||
}
|
||||
|
||||
* - shri_vec *v0*, *v1*, *i2*
|
||||
|
||||
sari_vec *v0*, *v1*, *i2*
|
||||
|
||||
rotli_vec *v0*, *v1*, *i2*
|
||||
|
||||
shrs_vec *v0*, *v1*, *s2*
|
||||
|
||||
sars_vec *v0*, *v1*, *s2*
|
||||
|
||||
- | Similarly for logical and arithmetic right shift, and left rotate.
|
||||
|
||||
* - shlv_vec *v0*, *v1*, *v2*
|
||||
|
||||
- | Shift elements from *v1* by elements from *v2*. I.e.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
for (i = 0; i < VECL/VECE; ++i) {
|
||||
v0[i] = v1[i] << v2[i];
|
||||
}
|
||||
|
||||
* - shrv_vec *v0*, *v1*, *v2*
|
||||
|
||||
sarv_vec *v0*, *v1*, *v2*
|
||||
|
||||
rotlv_vec *v0*, *v1*, *v2*
|
||||
|
||||
rotrv_vec *v0*, *v1*, *v2*
|
||||
|
||||
- | Similarly for logical and arithmetic right shift, and rotates.
|
||||
|
||||
* - cmp_vec *v0*, *v1*, *v2*, *cond*
|
||||
|
||||
- | Compare vectors by element, storing -1 for true and 0 for false.
|
||||
|
||||
* - bitsel_vec *v0*, *v1*, *v2*, *v3*
|
||||
|
||||
- | Bitwise select, *v0* = (*v2* & *v1*) | (*v3* & ~\ *v1*), across the entire vector.
|
||||
|
||||
* - cmpsel_vec *v0*, *c1*, *c2*, *v3*, *v4*, *cond*
|
||||
|
||||
- | Select elements based on comparison results:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
for (i = 0; i < n; ++i) {
|
||||
v0[i] = (c1[i] cond c2[i]) ? v3[i] : v4[i].
|
||||
}
|
||||
|
||||
**Note 1**: Some shortcuts are defined when the last operand is known to be
|
||||
a constant (e.g. addi for add, movi for mov).
|
||||
|
||||
**Note 2**: When using TCG, the opcodes must never be generated directly
|
||||
as some of them may not be available as "real" opcodes. Always use the
|
||||
function tcg_gen_xxx(args).
|
||||
|
||||
|
||||
Backend
|
||||
=======
|
||||
|
||||
``tcg-target.h`` contains the target specific definitions. ``tcg-target.c.inc``
|
||||
contains the target specific code; it is #included by ``tcg/tcg.c``, rather
|
||||
than being a standalone C file.
|
||||
|
||||
Assumptions
|
||||
-----------
|
||||
|
||||
The target word size (``TCG_TARGET_REG_BITS``) is expected to be 32 bit or
|
||||
64 bit. It is expected that the pointer has the same size as the word.
|
||||
|
||||
On a 32 bit target, all 64 bit operations are converted to 32 bits. A
|
||||
few specific operations must be implemented to allow it (see add2_i32,
|
||||
sub2_i32, brcond2_i32).
|
||||
|
||||
On a 64 bit target, the values are transferred between 32 and 64-bit
|
||||
registers using the following ops:
|
||||
|
||||
- trunc_shr_i64_i32
|
||||
- ext_i32_i64
|
||||
- extu_i32_i64
|
||||
|
||||
They ensure that the values are correctly truncated or extended when
|
||||
moved from a 32-bit to a 64-bit register or vice-versa. Note that the
|
||||
trunc_shr_i64_i32 is an optional op. It is not necessary to implement
|
||||
it if all the following conditions are met:
|
||||
|
||||
- 64-bit registers can hold 32-bit values
|
||||
- 32-bit values in a 64-bit register do not need to stay zero or
|
||||
sign extended
|
||||
- all 32-bit TCG ops ignore the high part of 64-bit registers
|
||||
|
||||
Floating point operations are not supported in this version. A
|
||||
previous incarnation of the code generator had full support of them,
|
||||
but it is better to concentrate on integer operations first.
|
||||
|
||||
Constraints
|
||||
----------------
|
||||
|
||||
GCC like constraints are used to define the constraints of every
|
||||
instruction. Memory constraints are not supported in this
|
||||
version. Aliases are specified in the input operands as for GCC.
|
||||
|
||||
The same register may be used for both an input and an output, even when
|
||||
they are not explicitly aliased. If an op expands to multiple target
|
||||
instructions then care must be taken to avoid clobbering input values.
|
||||
GCC style "early clobber" outputs are supported, with '``&``'.
|
||||
|
||||
A target can define specific register or constant constraints. If an
|
||||
operation uses a constant input constraint which does not allow all
|
||||
constants, it must also accept registers in order to have a fallback.
|
||||
The constraint '``i``' is defined generically to accept any constant.
|
||||
The constraint '``r``' is not defined generically, but is consistently
|
||||
used by each backend to indicate all registers.
|
||||
|
||||
The movi_i32 and movi_i64 operations must accept any constants.
|
||||
|
||||
The mov_i32 and mov_i64 operations must accept any registers of the
|
||||
same type.
|
||||
|
||||
The ld/st/sti instructions must accept signed 32 bit constant offsets.
|
||||
This can be implemented by reserving a specific register in which to
|
||||
compute the address if the offset is too big.
|
||||
|
||||
The ld/st instructions must accept any destination (ld) or source (st)
|
||||
register.
|
||||
|
||||
The sti instruction may fail if it cannot store the given constant.
|
||||
|
||||
Function call assumptions
|
||||
-------------------------
|
||||
|
||||
- The only supported types for parameters and return value are: 32 and
|
||||
64 bit integers and pointer.
|
||||
- The stack grows downwards.
|
||||
- The first N parameters are passed in registers.
|
||||
- The next parameters are passed on the stack by storing them as words.
|
||||
- Some registers are clobbered during the call.
|
||||
- The function can return 0 or 1 value in registers. On a 32 bit
|
||||
target, functions must be able to return 2 values in registers for
|
||||
64 bit return type.
|
||||
|
||||
|
||||
Recommended coding rules for best performance
|
||||
=============================================
|
||||
|
||||
- Use globals to represent the parts of the QEMU CPU state which are
|
||||
often modified, e.g. the integer registers and the condition
|
||||
codes. TCG will be able to use host registers to store them.
|
||||
|
||||
- Avoid globals stored in fixed registers. They must be used only to
|
||||
store the pointer to the CPU state and possibly to store a pointer
|
||||
to a register window.
|
||||
|
||||
- Use temporaries. Use local temporaries only when really needed,
|
||||
e.g. when you need to use a value after a jump. Local temporaries
|
||||
introduce a performance hit in the current TCG implementation: their
|
||||
content is saved to memory at end of each basic block.
|
||||
|
||||
- Free temporaries and local temporaries when they are no longer used
|
||||
(tcg_temp_free). Since tcg_const_x() also creates a temporary, you
|
||||
should free it after it is used. Freeing temporaries does not yield
|
||||
a better generated code, but it reduces the memory usage of TCG and
|
||||
the speed of the translation.
|
||||
|
||||
- Don't hesitate to use helpers for complicated or seldom used guest
|
||||
instructions. There is little performance advantage in using TCG to
|
||||
implement guest instructions taking more than about twenty TCG
|
||||
instructions. Note that this rule of thumb is more applicable to
|
||||
helpers doing complex logic or arithmetic, where the C compiler has
|
||||
scope to do a good job of optimisation; it is less relevant where
|
||||
the instruction is mostly doing loads and stores, and in those cases
|
||||
inline TCG may still be faster for longer sequences.
|
||||
|
||||
- The hard limit on the number of TCG instructions you can generate
|
||||
per guest instruction is set by ``MAX_OP_PER_INSTR`` in ``exec-all.h`` --
|
||||
you cannot exceed this without risking a buffer overrun.
|
||||
|
||||
- Use the 'discard' instruction if you know that TCG won't be able to
|
||||
prove that a given global is "dead" at a given program point. The
|
||||
x86 guest uses it to improve the condition codes optimisation.
|
@ -9,7 +9,7 @@ which make it relatively easily portable and simple while achieving good
|
||||
performances.
|
||||
|
||||
QEMU's dynamic translation backend is called TCG, for "Tiny Code
|
||||
Generator". For more information, please take a look at ``tcg/README``.
|
||||
Generator". For more information, please take a look at :ref:`tcg-ops-ref`.
|
||||
|
||||
The following sections outline some notable features and implementation
|
||||
details of QEMU's dynamic translator.
|
||||
|
@ -166,9 +166,9 @@ and user defined types.
|
||||
|
||||
Now, let's update our C implementation in monitor/qmp-cmds.c::
|
||||
|
||||
void qmp_hello_world(bool has_message, const char *message, Error **errp)
|
||||
void qmp_hello_world(const char *message, Error **errp)
|
||||
{
|
||||
if (has_message) {
|
||||
if (message) {
|
||||
printf("%s\n", message);
|
||||
} else {
|
||||
printf("Hello, world\n");
|
||||
@ -210,9 +210,9 @@ file. Basically, most errors are set by calling the error_setg() function.
|
||||
Let's say we don't accept the string "message" to contain the word "love". If
|
||||
it does contain it, we want the "hello-world" command to return an error::
|
||||
|
||||
void qmp_hello_world(bool has_message, const char *message, Error **errp)
|
||||
void qmp_hello_world(const char *message, Error **errp)
|
||||
{
|
||||
if (has_message) {
|
||||
if (message) {
|
||||
if (strstr(message, "love")) {
|
||||
error_setg(errp, "the word 'love' is not allowed");
|
||||
return;
|
||||
@ -467,9 +467,9 @@ There are a number of things to be noticed:
|
||||
allocated by the regular g_malloc0() function. Note that we chose to
|
||||
initialize the memory to zero. This is recommended for all QAPI types, as
|
||||
it helps avoiding bad surprises (specially with booleans)
|
||||
4. Remember that "next_deadline" is optional? All optional members have a
|
||||
'has_TYPE_NAME' member that should be properly set by the implementation,
|
||||
as shown above
|
||||
4. Remember that "next_deadline" is optional? Non-pointer optional
|
||||
members have a 'has_TYPE_NAME' member that should be properly set
|
||||
by the implementation, as shown above
|
||||
5. Even static strings, such as "alarm_timer->name", should be dynamically
|
||||
allocated by the implementation. This is so because the QAPI also generates
|
||||
a function to free its types and it cannot distinguish between dynamically
|
||||
|
@ -23,3 +23,4 @@ are useful for making QEMU interoperate with other software.
|
||||
vhost-user-gpu
|
||||
vhost-vdpa
|
||||
virtio-balloon-stats
|
||||
vnc-ledstate-pseudo-encoding
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user