Update to QEMU 9.0.0 (#67)

* Update to QEMU v9.0.0

---------

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Cédric Le Goater <clg@redhat.com>
Signed-off-by: Zheyu Ma <zheyuma97@gmail.com>
Signed-off-by: Ido Plat <ido.plat@ibm.com>
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
Signed-off-by: Gregory Price <gregory.price@memverge.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Lorenz Brun <lorenz@brun.one>
Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com>
Signed-off-by: Arnaud Minier <arnaud.minier@telecom-paris.fr>
Signed-off-by: Inès Varhol <ines.varhol@telecom-paris.fr>
Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Sven Schnelle <svens@stackframe.org>
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Helge Deller <deller@gmx.de>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>
Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
Signed-off-by: Joonas Kankaala <joonas.a.kankaala@gmail.com>
Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
Signed-off-by: Stefan Weil <sw@weilnetz.de>
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Signed-off-by: Glenn Miles <milesg@linux.ibm.com>
Signed-off-by: Oleg Sviridov <oleg.sviridov@red-soft.ru>
Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru>
Signed-off-by: Yajun Wu <yajunw@nvidia.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Signed-off-by: Pierre-Clément Tosi <ptosi@google.com>
Signed-off-by: Lei Wang <lei4.wang@intel.com>
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
Signed-off-by: Martin Hundebøll <martin@geanix.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
Signed-off-by: Wafer <wafer@jaguarmicro.com>
Signed-off-by: Yuxue Liu <yuxue.liu@jaguarmicro.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com>
Signed-off-by: Zack Buhman <zack@buhman.org>
Signed-off-by: Keith Packard <keithp@keithp.com>
Signed-off-by: Yuquan Wang wangyuquan1236@phytium.com.cn
Signed-off-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com>
Signed-off-by: Cindy Lu <lulu@redhat.com>
Co-authored-by: Peter Maydell <peter.maydell@linaro.org>
Co-authored-by: Fabiano Rosas <farosas@suse.de>
Co-authored-by: Peter Xu <peterx@redhat.com>
Co-authored-by: Thomas Huth <thuth@redhat.com>
Co-authored-by: Cédric Le Goater <clg@redhat.com>
Co-authored-by: Zheyu Ma <zheyuma97@gmail.com>
Co-authored-by: Ido Plat <ido.plat@ibm.com>
Co-authored-by: Ilya Leoshkevich <iii@linux.ibm.com>
Co-authored-by: Markus Armbruster <armbru@redhat.com>
Co-authored-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Co-authored-by: Paolo Bonzini <pbonzini@redhat.com>
Co-authored-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Co-authored-by: David Hildenbrand <david@redhat.com>
Co-authored-by: Kevin Wolf <kwolf@redhat.com>
Co-authored-by: Stefan Reiter <s.reiter@proxmox.com>
Co-authored-by: Fiona Ebner <f.ebner@proxmox.com>
Co-authored-by: Gregory Price <gregory.price@memverge.com>
Co-authored-by: Lorenz Brun <lorenz@brun.one>
Co-authored-by: Yao Xingtao <yaoxt.fnst@fujitsu.com>
Co-authored-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Co-authored-by: Arnaud Minier <arnaud.minier@telecom-paris.fr>
Co-authored-by: BALATON Zoltan <balaton@eik.bme.hu>
Co-authored-by: Igor Mammedov <imammedo@redhat.com>
Co-authored-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Co-authored-by: Richard Henderson <richard.henderson@linaro.org>
Co-authored-by: Sven Schnelle <svens@stackframe.org>
Co-authored-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Co-authored-by: Helge Deller <deller@kernel.org>
Co-authored-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Co-authored-by: Benjamin Gray <bgray@linux.ibm.com>
Co-authored-by: Nicholas Piggin <npiggin@gmail.com>
Co-authored-by: Avihai Horon <avihaih@nvidia.com>
Co-authored-by: Michael Tokarev <mjt@tls.msk.ru>
Co-authored-by: Joonas Kankaala <joonas.a.kankaala@gmail.com>
Co-authored-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
Co-authored-by: Stefan Weil <sw@weilnetz.de>
Co-authored-by: Dayu Liu <liu.dayu@zte.com.cn>
Co-authored-by: Zhao Liu <zhao1.liu@intel.com>
Co-authored-by: Glenn Miles <milesg@linux.vnet.ibm.com>
Co-authored-by: Artem Chernyshev <artem.chernyshev@red-soft.ru>
Co-authored-by: Yajun Wu <yajunw@nvidia.com>
Co-authored-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Co-authored-by: Pierre-Clément Tosi <ptosi@google.com>
Co-authored-by: Wei Wang <wei.w.wang@intel.com>
Co-authored-by: Martin Hundebøll <martin@geanix.com>
Co-authored-by: Michael S. Tsirkin <mst@redhat.com>
Co-authored-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
Co-authored-by: Wafer <wafer@jaguarmicro.com>
Co-authored-by: lyx634449800 <yuxue.liu@jaguarmicro.com>
Co-authored-by: Gerd Hoffmann <kraxel@redhat.com>
Co-authored-by: Nguyen Dinh Phi <phind.uet@gmail.com>
Co-authored-by: Zack Buhman <zack@buhman.org>
Co-authored-by: Keith Packard <keithp@keithp.com>
Co-authored-by: Yuquan Wang <wangyuquan1236@phytium.com.cn>
Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com>
Co-authored-by: Cindy Lu <lulu@redhat.com>
This commit is contained in:
Romain Malmain 2024-04-24 17:03:30 +02:00
parent bf82921212
commit 7c3c7877d8
2409 changed files with 75240 additions and 38772 deletions

View File

@ -24,6 +24,10 @@ variables:
# Each script line from will be in a collapsible section in the job output
# and show the duration of each line.
FF_SCRIPT_SECTIONS: 1
# The project has a fairly fat GIT repo so we try and avoid bringing in things
# we don't need. The --filter options avoid blobs and tree references we aren't going to use
# and we also avoid fetching tags.
GIT_FETCH_EXTRA_FLAGS: --filter=blob:none --filter=tree:0 --no-tags --prune --quiet
interruptible: true
@ -41,6 +45,10 @@ variables:
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG'
when: never
# Scheduled runs on mainline don't get pipelines except for the special Coverity job
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
when: never
# Cirrus jobs can't run unless the creds / target repo are set
- if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
when: never

View File

@ -14,6 +14,7 @@
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
- du -sh .git
- mkdir build
- cd build
- ccache --zero-stats

View File

@ -41,7 +41,7 @@ build-system-ubuntu:
variables:
IMAGE: ubuntu2204
CONFIGURE_ARGS: --enable-docs
TARGETS: alpha-softmmu microblaze-softmmu mips64el-softmmu
TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
check-system-ubuntu:
@ -61,7 +61,7 @@ avocado-system-ubuntu:
variables:
IMAGE: ubuntu2204
MAKE_CHECK_ARGS: check-avocado
AVOCADO_TAGS: arch:alpha arch:microblaze arch:mips64el
AVOCADO_TAGS: arch:alpha arch:microblazeel arch:mips64el
build-system-debian:
extends:
@ -167,6 +167,75 @@ build-system-centos:
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
# Previous QEMU release. Used for cross-version migration tests.
build-previous-qemu:
extends: .native_build_job_template
artifacts:
when: on_success
expire_in: 2 days
paths:
- build-previous
exclude:
- build-previous/**/*.p
- build-previous/**/*.a.p
- build-previous/**/*.fa.p
- build-previous/**/*.c.o
- build-previous/**/*.c.o.d
- build-previous/**/*.fa
needs:
job: amd64-opensuse-leap-container
variables:
IMAGE: opensuse-leap
TARGETS: x86_64-softmmu aarch64-softmmu
# Override the default flags as we need more to grab the old version
GIT_FETCH_EXTRA_FLAGS: --prune --quiet
before_script:
- export QEMU_PREV_VERSION="$(sed 's/\([0-9.]*\)\.[0-9]*/v\1.0/' VERSION)"
- git remote add upstream https://gitlab.com/qemu-project/qemu
- git fetch upstream refs/tags/$QEMU_PREV_VERSION:refs/tags/$QEMU_PREV_VERSION
- git checkout $QEMU_PREV_VERSION
after_script:
- mv build build-previous
.migration-compat-common:
extends: .common_test_job_template
needs:
- job: build-previous-qemu
- job: build-system-opensuse
# The old QEMU could have bugs unrelated to migration that are
# already fixed in the current development branch, so this test
# might fail.
allow_failure: true
variables:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check-build
script:
# Use the migration-tests from the older QEMU tree. This avoids
# testing an old QEMU against new features/tests that it is not
# compatible with.
- cd build-previous
# old to new
- QTEST_QEMU_BINARY_SRC=./qemu-system-${TARGET}
QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
# new to old
- QTEST_QEMU_BINARY_DST=./qemu-system-${TARGET}
QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
# This job needs to be disabled until we can have an aarch64 CPU model that
# will both (1) support both KVM and TCG, and (2) provide a stable ABI.
# Currently only "-cpu max" can provide (1), however it doesn't guarantee
# (2). Mark this test skipped until later.
migration-compat-aarch64:
extends: .migration-compat-common
variables:
TARGET: aarch64
QEMU_JOB_SKIPPED: 1
migration-compat-x86_64:
extends: .migration-compat-common
variables:
TARGET: x86_64
check-system-centos:
extends: .native_test_job_template
needs:
@ -184,7 +253,7 @@ avocado-system-centos:
variables:
IMAGE: centos8
MAKE_CHECK_ARGS: check-avocado
AVOCADO_TAGS: arch:ppc64 arch:or1k arch:390x arch:x86_64 arch:rx
AVOCADO_TAGS: arch:ppc64 arch:or1k arch:s390x arch:x86_64 arch:rx
arch:sh4 arch:nios2
build-system-opensuse:
@ -592,7 +661,7 @@ build-without-defaults:
--disable-pie
--disable-qom-cast-debug
--disable-strip
TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
TARGETS: avr-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user
MAKE_CHECK_ARGS: check
@ -662,3 +731,40 @@ pages:
- public
variables:
QEMU_JOB_PUBLISH: 1
coverity:
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
stage: build
allow_failure: true
timeout: 3h
needs:
- job: amd64-fedora-container
optional: true
before_script:
- dnf install -y curl wget
script:
# would be nice to cancel the job if over quota (https://gitlab.com/gitlab-org/gitlab/-/issues/256089)
# for example:
# curl --request POST --header "PRIVATE-TOKEN: $CI_JOB_TOKEN" "${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/jobs/${CI_JOB_ID}/cancel
- 'scripts/coverity-scan/run-coverity-scan --check-upload-only || { exitcode=$?; if test $exitcode = 1; then
exit 0;
else
exit $exitcode;
fi; };
scripts/coverity-scan/run-coverity-scan --update-tools-only > update-tools.log 2>&1 || { cat update-tools.log; exit 1; };
scripts/coverity-scan/run-coverity-scan --no-update-tools'
rules:
- if: '$COVERITY_TOKEN == null'
when: never
- if: '$COVERITY_EMAIL == null'
when: never
# Never included on upstream pipelines, except for schedules
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
when: on_success
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
when: never
# Forks don't get any pipeline unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2"'
when: never
# Always manual on forks even if $QEMU_CI == "2"
- when: manual

View File

@ -13,7 +13,7 @@
.cirrus_build_job:
extends: .base_job_template
stage: build
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:latest
needs: []
# 20 mins larger than "timeout_in" in cirrus/build.yml
# as there's often a 5-10 minute delay before Cirrus CI
@ -52,7 +52,7 @@ x64-freebsd-13-build:
NAME: freebsd-13
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-13-2
CIRRUS_VM_IMAGE_NAME: freebsd-13-3
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update; pkg upgrade -y

View File

@ -101,11 +101,6 @@ cris-fedora-cross-container:
variables:
NAME: fedora-cris-cross
win32-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-win32-cross
win64-fedora-cross-container:
extends: .container_job_template
variables:

View File

@ -159,20 +159,6 @@ cross-mips64el-kvm-only:
IMAGE: debian-mips64el-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu
cross-win32-system:
extends: .cross_system_build_job
needs:
job: win32-fedora-cross-container
variables:
IMAGE: fedora-win32-cross
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
microblazeel-softmmu mips64el-softmmu nios2-softmmu
artifacts:
when: on_success
paths:
- build/qemu-setup*.exe
cross-win64-system:
extends: .cross_system_build_job
needs:

View File

@ -10,13 +10,14 @@
# gitlab-runner. To avoid problems that gitlab-runner can cause while
# reusing the GIT repository, let's enable the clone strategy, which
# guarantees a fresh repository on each job run.
variables:
GIT_STRATEGY: clone
# All custom runners can extend this template to upload the testlog
# data as an artifact and also feed the junit report
.custom_runner_template:
extends: .base_job_template
variables:
GIT_STRATEGY: clone
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days

View File

@ -24,6 +24,10 @@
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i'
when: manual
# Scheduled runs on mainline don't get pipelines except for the special Coverity job
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
when: never
# Run if any files affecting the build output are touched
- changes:
- .gitlab-ci.d/opensbi.yml

View File

@ -1,4 +1,4 @@
.shared_msys2_builder:
msys2-64bit:
extends: .base_job_template
tags:
- shared-windows
@ -14,9 +14,22 @@
stage: build
timeout: 100m
variables:
# Select the "64 bit, gcc and MSVCRT" MSYS2 environment
MSYSTEM: MINGW64
# This feature doesn't (currently) work with PowerShell, it stops
# the echo'ing of commands being run and doesn't show any timing
FF_SCRIPT_SECTIONS: 0
# do not remove "--without-default-devices"!
# commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
# changed to compile QEMU with the --without-default-devices switch
# for this job, because otherwise the build could not complete within
# the project timeout.
CONFIGURE_ARGS: --target-list=x86_64-softmmu --without-default-devices -Ddebug=false -Doptimization=0
# qTests don't run successfully with "--without-default-devices",
# so let's exclude the qtests from CI for now.
TEST_ARGS: --no-suite qtest
# The Windows git is a bit older so override the default
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days
@ -72,33 +85,35 @@
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
bison diffutils flex
git grep make sed
$MINGW_TARGET-binutils
$MINGW_TARGET-capstone
$MINGW_TARGET-ccache
$MINGW_TARGET-curl
$MINGW_TARGET-cyrus-sasl
$MINGW_TARGET-dtc
$MINGW_TARGET-gcc
$MINGW_TARGET-glib2
$MINGW_TARGET-gnutls
$MINGW_TARGET-gtk3
$MINGW_TARGET-libgcrypt
$MINGW_TARGET-libjpeg-turbo
$MINGW_TARGET-libnfs
$MINGW_TARGET-libpng
$MINGW_TARGET-libssh
$MINGW_TARGET-libtasn1
$MINGW_TARGET-lzo2
$MINGW_TARGET-nettle
$MINGW_TARGET-ninja
$MINGW_TARGET-pixman
$MINGW_TARGET-pkgconf
$MINGW_TARGET-python
$MINGW_TARGET-SDL2
$MINGW_TARGET-SDL2_image
$MINGW_TARGET-snappy
$MINGW_TARGET-zstd
$EXTRA_PACKAGES "
mingw-w64-x86_64-binutils
mingw-w64-x86_64-capstone
mingw-w64-x86_64-ccache
mingw-w64-x86_64-curl
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-dtc
mingw-w64-x86_64-gcc
mingw-w64-x86_64-glib2
mingw-w64-x86_64-gnutls
mingw-w64-x86_64-gtk3
mingw-w64-x86_64-libgcrypt
mingw-w64-x86_64-libjpeg-turbo
mingw-w64-x86_64-libnfs
mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
mingw-w64-x86_64-libtasn1
mingw-w64-x86_64-libusb
mingw-w64-x86_64-lzo2
mingw-w64-x86_64-nettle
mingw-w64-x86_64-ninja
mingw-w64-x86_64-pixman
mingw-w64-x86_64-pkgconf
mingw-w64-x86_64-python
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-snappy
mingw-w64-x86_64-spice
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-zstd"
- Write-Output "Running build at $(Get-Date -Format u)"
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
@ -115,19 +130,3 @@
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
- Write-Output "Finished build at $(Get-Date -Format u)"
msys2-64bit:
extends: .shared_msys2_builder
variables:
MINGW_TARGET: mingw-w64-x86_64
MSYSTEM: MINGW64
# msys2 only ship these packages for 64-bit, not 32-bit
EXTRA_PACKAGES: $MINGW_TARGET-libusb $MINGW_TARGET-usbredir $MINGW_TARGET-spice
# do not remove "--without-default-devices"!
# commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
# changed to compile QEMU with the --without-default-devices switch
# for the msys2 64-bit job, due to the build could not complete within
CONFIGURE_ARGS: --target-list=x86_64-softmmu --without-default-devices -Ddebug=false -Doptimization=0
# qTests don't run successfully with "--without-default-devices",
# so let's exclude the qtests from CI for now.
TEST_ARGS: --no-suite qtest

View File

@ -36,6 +36,8 @@ Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
Timothée Cocault <timothee.cocault@gmail.com> timothee.cocault@gmail.com <timothee.cocault@gmail.com>
Stefan Weil <sw@weilnetz.de> <weil@mail.berlios.de>
Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@kiwi.(none)>
# There is also a:
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
@ -60,6 +62,7 @@ Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-trivial@nongnu.org>
Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-block@nongnu.org>
BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org>
@ -81,6 +84,7 @@ Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
Juan Quintela <quintela@trasno.org> <quintela@redhat.com>
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
@ -97,6 +101,7 @@ Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com>
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@weilnetz.de>
Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>

View File

@ -35,7 +35,7 @@ env:
- TEST_BUILD_CMD=""
- TEST_CMD="make check V=1"
# This is broadly a list of "mainline" system targets which have support across the major distros
- MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
- MAIN_SYSTEM_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
- CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
- CCACHE_MAXSIZE=1G
- G_MESSAGES_DEBUG=error
@ -114,7 +114,7 @@ jobs:
env:
- TEST_CMD="make check check-tcg V=1"
- CONFIG="--disable-containers --enable-fdt=system
--target-list=${MAIN_SOFTMMU_TARGETS} --cxx=/bin/false"
--target-list=${MAIN_SYSTEM_TARGETS} --cxx=/bin/false"
- UNRELIABLE=true
- name: "[ppc64] GCC check-tcg"
@ -184,8 +184,8 @@ jobs:
- genisoimage
env:
- TEST_CMD="make check check-tcg V=1"
- CONFIG="--disable-containers --enable-fdt=system
--target-list=${MAIN_SOFTMMU_TARGETS},s390x-linux-user"
- CONFIG="--disable-containers
--target-list=hppa-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
- UNRELIABLE=true
script:
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
@ -220,13 +220,12 @@ jobs:
- libsnappy-dev
- libzstd-dev
- nettle-dev
- xfslibs-dev
- ninja-build
# Tests dependencies
- genisoimage
env:
- CONFIG="--disable-containers --enable-fdt=system --audio-drv-list=sdl
--disable-user --target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
- CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
--target-list=arm-softmmu,avr-softmmu,microblaze-softmmu,sh4eb-softmmu,sparc64-softmmu,xtensaeb-softmmu"
- name: "[s390x] GCC (user)"
arch: s390x
@ -240,6 +239,7 @@ jobs:
- flex
- bison
env:
- TEST_CMD="make check check-tcg V=1"
- CONFIG="--disable-containers --disable-system"
- name: "[s390x] Clang (disable-tcg)"

View File

@ -70,7 +70,6 @@ R: Daniel P. Berrangé <berrange@redhat.com>
R: Thomas Huth <thuth@redhat.com>
R: Markus Armbruster <armbru@redhat.com>
R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Juan Quintela <quintela@redhat.com>
W: https://www.qemu.org/docs/master/devel/index.html
S: Odd Fixes
F: docs/devel/style.rst
@ -317,7 +316,6 @@ F: tests/tcg/openrisc/
PowerPC TCG CPUs
M: Nicholas Piggin <npiggin@gmail.com>
M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: Cédric Le Goater <clg@kaod.org>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: target/ppc/
@ -469,7 +467,6 @@ F: target/mips/sysemu/
PPC KVM CPUs
M: Nicholas Piggin <npiggin@gmail.com>
R: Daniel Henrique Barboza <danielhb413@gmail.com>
R: Cédric Le Goater <clg@kaod.org>
S: Odd Fixes
F: target/ppc/kvm.c
@ -643,6 +640,7 @@ R: Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
L: qemu-arm@nongnu.org
S: Odd Fixes
F: hw/*/allwinner*
F: hw/ide/ahci-allwinner.c
F: include/hw/*/allwinner*
F: hw/arm/cubieboard.c
F: docs/system/arm/cubieboard.rst
@ -820,12 +818,13 @@ F: include/hw/misc/imx7_*.h
F: hw/pci-host/designware.c
F: include/hw/pci-host/designware.h
MPS2
MPS2 / MPS3
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/mps2.c
F: hw/arm/mps2-tz.c
F: hw/arm/mps3r.c
F: hw/misc/mps2-*.c
F: include/hw/misc/mps2-*.h
F: hw/arm/armsse.c
@ -1123,6 +1122,25 @@ L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/olimex-stm32-h405.c
STM32L4x5 SoC Family
M: Arnaud Minier <arnaud.minier@telecom-paris.fr>
M: Inès Varhol <ines.varhol@telecom-paris.fr>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/stm32l4x5_soc.c
F: hw/misc/stm32l4x5_exti.c
F: hw/misc/stm32l4x5_syscfg.c
F: hw/misc/stm32l4x5_rcc.c
F: hw/gpio/stm32l4x5_gpio.c
F: include/hw/*/stm32l4x5_*.h
B-L475E-IOT01A IoT Node
M: Arnaud Minier <arnaud.minier@telecom-paris.fr>
M: Inès Varhol <ines.varhol@telecom-paris.fr>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/b-l475e-iot01a.c
SmartFusion2
M: Subbaraya Sundeep <sundeep.lkml@gmail.com>
M: Peter Maydell <peter.maydell@linaro.org>
@ -1155,9 +1173,7 @@ R: Joel Stanley <joel@jms.id.au>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/*/*aspeed*
F: hw/misc/pca9552.c
F: include/hw/*/*aspeed*
F: include/hw/misc/pca9552*.h
F: hw/net/ftgmac100.c
F: include/hw/net/ftgmac100.h
F: docs/system/arm/aspeed.rst
@ -1343,6 +1359,7 @@ M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Aurelien Jarno <aurelien@aurel32.net>
S: Odd Fixes
F: hw/isa/piix.c
F: hw/isa/fdc37m81x-superio.c
F: hw/acpi/piix4.c
F: hw/mips/malta.c
F: hw/pci-host/gt64120.c
@ -1407,6 +1424,7 @@ Bamboo
L: qemu-ppc@nongnu.org
S: Orphan
F: hw/ppc/ppc440_bamboo.c
F: hw/pci-host/ppc4xx_pci.c
F: tests/avocado/ppc_bamboo.py
e500
@ -1488,7 +1506,6 @@ F: tests/avocado/ppc_prep_40p.py
sPAPR (pseries)
M: Nicholas Piggin <npiggin@gmail.com>
R: Daniel Henrique Barboza <danielhb413@gmail.com>
R: Cédric Le Goater <clg@kaod.org>
R: David Gibson <david@gibson.dropbear.id.au>
R: Harsh Prateek Bora <harshpb@linux.ibm.com>
L: qemu-ppc@nongnu.org
@ -1509,6 +1526,7 @@ F: tests/qtest/libqos/*spapr*
F: tests/qtest/rtas*
F: tests/qtest/libqos/rtas*
F: tests/avocado/ppc_pseries.py
F: tests/avocado/ppc_hv_tests.py
PowerNV (Non-Virtualized)
M: Cédric Le Goater <clg@kaod.org>
@ -1526,6 +1544,14 @@ F: include/hw/pci-host/pnv*
F: pc-bios/skiboot.lid
F: tests/qtest/pnv*
pca955x
M: Glenn Miles <milesg@linux.ibm.com>
L: qemu-ppc@nongnu.org
L: qemu-arm@nongnu.org
S: Odd Fixes
F: hw/gpio/pca955*.c
F: include/hw/gpio/pca955*.h
virtex_ml507
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
L: qemu-ppc@nongnu.org
@ -1539,13 +1565,14 @@ L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ppc/sam460ex.c
F: hw/ppc/ppc440_uc.c
F: hw/ppc/ppc440_pcix.c
F: hw/pci-host/ppc440_pcix.c
F: hw/display/sm501*
F: hw/ide/sii3112.c
F: hw/rtc/m41t80.c
F: pc-bios/canyonlands.dt[sb]
F: pc-bios/u-boot-sam460ex-20100605.bin
F: roms/u-boot-sam460ex
F: docs/system/ppc/amigang.rst
pegasos2
M: BALATON Zoltan <balaton@eik.bme.hu>
@ -1694,7 +1721,7 @@ F: hw/rtc/sun4v-rtc.c
F: include/hw/rtc/sun4v-rtc.h
Leon3
M: Fabien Chouteau <chouteau@adacore.com>
M: Clément Chigot <chigot@adacore.com>
M: Frederic Konrad <konrad.frederic@yahoo.fr>
S: Maintained
F: hw/sparc/leon3.c
@ -1853,7 +1880,8 @@ M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Yanan Wang <wangyanan55@huawei.com>
S: Supported
F: hw/core/cpu.c
F: hw/core/cpu-common.c
F: hw/core/cpu-sysemu.c
F: hw/core/machine-qmp-cmds.c
F: hw/core/machine.c
F: hw/core/machine-smp.c
@ -1919,7 +1947,6 @@ IDE
M: John Snow <jsnow@redhat.com>
L: qemu-block@nongnu.org
S: Odd Fixes
F: include/hw/ide.h
F: include/hw/ide/
F: hw/ide/
F: hw/block/block.c
@ -2053,6 +2080,7 @@ F: hw/ppc/ppc4xx*.c
F: hw/ppc/ppc440_uc.c
F: hw/ppc/ppc440.h
F: hw/i2c/ppc4xx_i2c.c
F: include/hw/pci-host/ppc4xx.h
F: include/hw/ppc/ppc4xx.h
F: include/hw/i2c/ppc4xx_i2c.h
F: hw/intc/ppc-uic.c
@ -2142,7 +2170,7 @@ S: Supported
F: hw/vfio/*
F: include/hw/vfio/
F: docs/igd-assign.txt
F: docs/devel/vfio-migration.rst
F: docs/devel/migration/vfio.rst
vfio-ccw
M: Eric Farman <farman@linux.ibm.com>
@ -2167,6 +2195,17 @@ F: hw/vfio/ap.c
F: docs/system/s390x/vfio-ap.rst
L: qemu-s390x@nongnu.org
iommufd
M: Yi Liu <yi.l.liu@intel.com>
M: Eric Auger <eric.auger@redhat.com>
M: Zhenzhong Duan <zhenzhong.duan@intel.com>
S: Supported
F: backends/iommufd.c
F: include/sysemu/iommufd.h
F: include/qemu/chardev_open.h
F: util/chardev_open.c
F: docs/devel/vfio-iommufd.rst
vhost
M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
@ -2192,6 +2231,7 @@ F: qapi/virtio.json
F: net/vhost-user.c
F: include/hw/virtio/
F: docs/devel/virtio*
F: docs/devel/migration/virtio.rst
virtio-balloon
M: Michael S. Tsirkin <mst@redhat.com>
@ -2263,8 +2303,9 @@ L: virtio-fs@lists.linux.dev
virtio-input
M: Gerd Hoffmann <kraxel@redhat.com>
S: Odd Fixes
F: hw/input/vhost-user-input.c
F: docs/system/devices/vhost-user-input.rst
F: hw/input/virtio-input*.c
F: hw/virtio/vhost-user-input.c
F: include/hw/virtio/virtio-input.h
F: contrib/vhost-user-input/*
@ -2293,6 +2334,12 @@ F: include/sysemu/rng*.h
F: backends/rng*.c
F: tests/qtest/virtio-rng-test.c
vhost-user-stubs
M: Alex Bennée <alex.bennee@linaro.org>
S: Maintained
F: hw/virtio/vhost-user-base.c
F: hw/virtio/vhost-user-device*
vhost-user-rng
M: Mathieu Poirier <mathieu.poirier@linaro.org>
S: Supported
@ -2310,6 +2357,13 @@ F: hw/virtio/vhost-user-gpio*
F: include/hw/virtio/vhost-user-gpio.h
F: tests/qtest/libqos/virtio-gpio.*
vhost-user-snd
M: Alex Bennée <alex.bennee@linaro.org>
R: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
S: Maintained
F: hw/virtio/vhost-user-snd*
F: include/hw/virtio/vhost-user-snd.h
vhost-user-scmi
R: mzamazal@redhat.com
S: Supported
@ -2352,6 +2406,7 @@ F: docs/system/devices/virtio-snd.rst
nvme
M: Keith Busch <kbusch@kernel.org>
M: Klaus Jensen <its@irrelevant.dk>
R: Jesper Devantier <foss@defmacro.it>
L: qemu-block@nongnu.org
S: Supported
F: hw/nvme/*
@ -2388,8 +2443,13 @@ F: hw/net/net_tx_pkt*
Vmware
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
S: Maintained
F: docs/specs/vmw_pvscsi-spec.txt
F: hw/display/vmware_vga.c
F: hw/net/vmxnet*
F: hw/scsi/vmw_pvscsi*
F: pc-bios/efi-vmxnet3.rom
F: pc-bios/vgabios-vmware.bin
F: roms/config.vga-vmware
F: tests/qtest/vmxnet3-test.c
F: docs/specs/vwm_pvscsi-spec.rst
@ -2443,6 +2503,12 @@ S: Maintained
F: hw/i2c/i2c_mux_pca954x.c
F: include/hw/i2c/i2c_mux_pca954x.h
pcf8574
M: Dmitrii Sharikhin <d.sharikhin@yadro.com>
S: Maintained
F: hw/gpio/pcf8574.c
F: include/gpio/pcf8574.h
Generic Loader
M: Alistair Francis <alistair@alistair23.me>
S: Maintained
@ -2525,7 +2591,7 @@ F: include/hw/virtio/virtio-gpu.h
F: docs/system/devices/virtio-gpu.rst
vhost-user-blk
M: Raphael Norwitz <raphael.norwitz@nutanix.com>
M: Raphael Norwitz <raphael@enfabrica.net>
S: Maintained
F: contrib/vhost-user-blk/
F: contrib/vhost-user-scsi/
@ -2863,6 +2929,7 @@ S: Supported
F: hw/cxl/
F: hw/mem/cxl_type3.c
F: include/hw/cxl/
F: qapi/cxl.json
Dirty Bitmaps
M: Eric Blake <eblake@redhat.com>
@ -2942,7 +3009,7 @@ F: include/qapi/error.h
F: include/qemu/error-report.h
F: qapi/error.json
F: util/error.c
F: util/qemu-error.c
F: util/error-report.c
F: scripts/coccinelle/err-bad-newline.cocci
F: scripts/coccinelle/error-use-after-free.cocci
F: scripts/coccinelle/error_propagate_null.cocci
@ -3289,6 +3356,7 @@ Stats
S: Orphan
F: include/sysemu/stats.h
F: stats/
F: qapi/stats.json
Streams
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
@ -3339,10 +3407,8 @@ S: Odd Fixes
F: scripts/checkpatch.pl
Migration
M: Juan Quintela <quintela@redhat.com>
M: Peter Xu <peterx@redhat.com>
M: Fabiano Rosas <farosas@suse.de>
R: Leonardo Bras <leobras@redhat.com>
S: Maintained
F: hw/core/vmstate-if.c
F: include/hw/vmstate-if.h
@ -3352,17 +3418,15 @@ F: migration/
F: scripts/vmstate-static-checker.py
F: tests/vmstate-static-checker-data/
F: tests/qtest/migration-test.c
F: docs/devel/migration.rst
F: docs/devel/migration/
F: qapi/migration.json
F: tests/migration/
F: util/userfaultfd.c
X: migration/rdma*
RDMA Migration
M: Juan Quintela <quintela@redhat.com>
R: Li Zhijian <lizhijian@fujitsu.com>
R: Peter Xu <peterx@redhat.com>
R: Leonardo Bras <leobras@redhat.com>
S: Odd Fixes
F: migration/rdma*
@ -3374,6 +3438,12 @@ F: include/sysemu/dirtylimit.h
F: migration/dirtyrate.c
F: migration/dirtyrate.h
F: include/sysemu/dirtyrate.h
F: docs/devel/migration/dirty-limit.rst
Detached LUKS header
M: Hyman Huang <yong.huang@smartx.com>
S: Maintained
F: tests/qemu-iotests/tests/luks-detached-header
D-Bus
M: Marc-André Lureau <marcandre.lureau@redhat.com>
@ -3524,6 +3594,7 @@ F: util/iova-tree.c
elf2dmp
M: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: contrib/elf2dmp/
@ -3558,6 +3629,15 @@ F: tests/qtest/adm1272-test.c
F: tests/qtest/max34451-test.c
F: tests/qtest/isl_pmbus_vr-test.c
FSI
M: Ninad Palsule <ninad@linux.ibm.com>
R: Cédric Le Goater <clg@kaod.org>
S: Maintained
F: hw/fsi/*
F: include/hw/fsi/*
F: docs/specs/fsi.rst
F: tests/qtest/aspeed_fsi-test.c
Firmware schema specifications
M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Daniel P. Berrange <berrange@redhat.com>
@ -3580,7 +3660,6 @@ F: tests/uefi-test-tools/
VT-d Emulation
M: Michael S. Tsirkin <mst@redhat.com>
M: Peter Xu <peterx@redhat.com>
R: Jason Wang <jasowang@redhat.com>
S: Supported
F: hw/i386/intel_iommu.c
@ -3609,6 +3688,16 @@ F: hw/core/clock-vmstate.c
F: hw/core/qdev-clock.c
F: docs/devel/clocks.rst
Reset framework
M: Peter Maydell <peter.maydell@linaro.org>
S: Maintained
F: include/hw/resettable.h
F: include/hw/core/resetcontainer.h
F: include/sysemu/reset.h
F: hw/core/reset.c
F: hw/core/resettable.c
F: hw/core/resetcontainer.c
Usermode Emulation
------------------
Overall usermode emulation
@ -3649,6 +3738,7 @@ TCG Plugins
M: Alex Bennée <alex.bennee@linaro.org>
R: Alexandre Iooss <erdnaxe@crans.org>
R: Mahmoud Mandour <ma.mandourr@gmail.com>
R: Pierrick Bouvier <pierrick.bouvier@linaro.org>
S: Maintained
F: docs/devel/tcg-plugins.rst
F: plugins/
@ -4149,6 +4239,7 @@ F: docs/conf.py
F: docs/*/conf.py
F: docs/sphinx/
F: docs/_templates/
F: docs/devel/docs.rst
Miscellaneous
-------------

View File

@ -141,8 +141,13 @@ MAKE.n = $(findstring n,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \
$(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \
NINJAFLAGS = \
$(if $V,-v) \
$(if $(MAKE.n), -n) \
$(if $(MAKE.k), -k0) \
$(filter-out -j, \
$(or $(filter -l% -j%, $(MAKEFLAGS)), \
$(if $(filter --jobserver-auth=%, $(MAKEFLAGS)),, -j1))) \
-d keepdepfile
ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
@ -202,6 +207,7 @@ clean: recurse-clean
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \
-exec rm {} +
rm -f TAGS cscope.* *~ */*~
@$(MAKE) -Ctests/qemu-iotests clean
VERSION = $(shell cat $(SRC_PATH)/VERSION)

View File

@ -1 +1 @@
8.2.2
9.0.0

View File

@ -16,3 +16,4 @@ config KVM
config XEN
bool
select FSDEV_9P if VIRTFS
select XEN_BUS

View File

@ -41,7 +41,7 @@ void accel_blocker_init(void)
void accel_ioctl_begin(void)
{
if (likely(qemu_mutex_iothread_locked())) {
if (likely(bql_locked())) {
return;
}
@ -51,7 +51,7 @@ void accel_ioctl_begin(void)
void accel_ioctl_end(void)
{
if (likely(qemu_mutex_iothread_locked())) {
if (likely(bql_locked())) {
return;
}
@ -62,7 +62,7 @@ void accel_ioctl_end(void)
void accel_cpu_ioctl_begin(CPUState *cpu)
{
if (unlikely(qemu_mutex_iothread_locked())) {
if (unlikely(bql_locked())) {
return;
}
@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)
void accel_cpu_ioctl_end(CPUState *cpu)
{
if (unlikely(qemu_mutex_iothread_locked())) {
if (unlikely(bql_locked())) {
return;
}
@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
* We allow to inhibit only when holding the BQL, so we can identify
* when an inhibitor wants to issue an ioctl easily.
*/
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
/* Block further invocations of the ioctls outside the BQL. */
CPU_FOREACH(cpu) {

View File

@ -62,7 +62,7 @@ void accel_setup_post(MachineState *ms)
}
/* initialize the arch-independent accel operation interfaces */
void accel_init_ops_interfaces(AccelClass *ac)
void accel_system_init_ops_interfaces(AccelClass *ac)
{
const char *ac_name;
char *ops_name;

View File

@ -10,6 +10,6 @@
#ifndef ACCEL_SYSTEM_H
#define ACCEL_SYSTEM_H
void accel_init_ops_interfaces(AccelClass *ac);
void accel_system_init_ops_interfaces(AccelClass *ac);
#endif /* ACCEL_SYSTEM_H */

View File

@ -104,7 +104,7 @@ static void accel_init_cpu_interfaces(AccelClass *ac)
void accel_init_interfaces(AccelClass *ac)
{
#ifndef CONFIG_USER_ONLY
accel_init_ops_interfaces(ac);
accel_system_init_ops_interfaces(ac);
#endif /* !CONFIG_USER_ONLY */
accel_init_cpu_interfaces(ac);

View File

@ -24,10 +24,9 @@ static void *dummy_cpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu;
#ifndef _WIN32
@ -43,7 +42,7 @@ static void *dummy_cpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
qemu_mutex_unlock_iothread();
bql_unlock();
#ifndef _WIN32
do {
int sig;
@ -56,11 +55,11 @@ static void *dummy_cpu_thread_fn(void *arg)
#else
qemu_sem_wait(&cpu->sem);
#endif
qemu_mutex_lock_iothread();
bql_lock();
qemu_wait_io_event(cpu);
} while (!cpu->unplug);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}

View File

@ -424,11 +424,10 @@ static void *hvf_cpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu;
hvf_init_vcpu(cpu);
@ -449,7 +448,7 @@ static void *hvf_cpu_thread_fn(void *arg)
hvf_vcpu_destroy(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}

View File

@ -33,10 +33,9 @@ static void *kvm_vcpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
current_cpu = cpu;
r = kvm_init_vcpu(cpu, &error_fatal);
@ -58,7 +57,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
kvm_destroy_vcpu(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}

View File

@ -69,16 +69,6 @@
#define KVM_GUESTDBG_BLOCKIRQ 0
#endif
//#define DEBUG_KVM
#ifdef DEBUG_KVM
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
struct KVMParkedVcpu {
unsigned long vcpu_id;
int kvm_fd;
@ -98,7 +88,7 @@ bool kvm_allowed;
bool kvm_readonly_mem_allowed;
bool kvm_vm_attributes_allowed;
bool kvm_msi_use_devid;
bool kvm_has_guest_debug;
static bool kvm_has_guest_debug;
static int kvm_sstep_flags;
static bool kvm_immediate_exit;
static hwaddr kvm_max_slot_size = ~0;
@ -331,7 +321,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
struct KVMParkedVcpu *vcpu = NULL;
int ret = 0;
DPRINTF("kvm_destroy_vcpu\n");
trace_kvm_destroy_vcpu();
ret = kvm_arch_destroy_vcpu(cpu);
if (ret < 0) {
@ -341,7 +331,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
ret = mmap_size;
DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
trace_kvm_failed_get_vcpu_mmap_size();
goto err;
}
@ -443,7 +433,6 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
if (cpu->kvm_dirty_gfns == MAP_FAILED) {
ret = -errno;
DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret);
goto err;
}
}
@ -817,7 +806,7 @@ static void kvm_dirty_ring_flush(void)
* should always be with BQL held, serialization is guaranteed.
* However, let's be sure of it.
*/
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
/*
* First make sure to flush the hardware buffers by kicking all
* vcpus out in a synchronous way.
@ -1130,6 +1119,11 @@ int kvm_vm_check_extension(KVMState *s, unsigned int extension)
return ret;
}
/*
* We track the poisoned pages to be able to:
* - replace them on VM reset
* - block a migration for a VM with a poisoned page
*/
typedef struct HWPoisonPage {
ram_addr_t ram_addr;
QLIST_ENTRY(HWPoisonPage) list;
@ -1163,6 +1157,11 @@ void kvm_hwpoison_page_add(ram_addr_t ram_addr)
QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
}
bool kvm_hwpoisoned_mem(void)
{
return !QLIST_EMPTY(&hwpoison_page_list);
}
static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
{
#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
@ -1402,9 +1401,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
trace_kvm_dirty_ring_reaper("wakeup");
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
qemu_mutex_lock_iothread();
bql_lock();
kvm_dirty_ring_reap(s, NULL);
qemu_mutex_unlock_iothread();
bql_unlock();
r->reaper_iteration++;
}
@ -2000,12 +1999,17 @@ int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
return -EINVAL;
}
trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
vector, virq);
if (s->irq_routes->nr < s->gsi_count) {
trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
vector, virq);
kvm_add_routing_entry(s, &kroute);
kvm_arch_add_msi_route_post(&kroute, vector, dev);
c->changes++;
kvm_add_routing_entry(s, &kroute);
kvm_arch_add_msi_route_post(&kroute, vector, dev);
c->changes++;
} else {
kvm_irqchip_release_virq(s, virq);
return -ENOSPC;
}
return virq;
}
@ -2360,7 +2364,7 @@ static int kvm_init(MachineState *ms)
QTAILQ_INIT(&s->kvm_sw_breakpoints);
#endif
QLIST_INIT(&s->kvm_parked_vcpus);
s->fd = qemu_open_old("/dev/kvm", O_RDWR);
s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
if (s->fd == -1) {
fprintf(stderr, "Could not access KVM kernel module: %m\n");
ret = -errno;
@ -2821,14 +2825,14 @@ int kvm_cpu_exec(CPUState *cpu)
struct kvm_run *run = cpu->kvm_run;
int ret, run_ret;
DPRINTF("kvm_cpu_exec()\n");
trace_kvm_cpu_exec();
if (kvm_arch_process_async_events(cpu)) {
qatomic_set(&cpu->exit_request, 0);
return EXCP_HLT;
}
qemu_mutex_unlock_iothread();
bql_unlock();
cpu_exec_start(cpu);
do {
@ -2848,7 +2852,7 @@ int kvm_cpu_exec(CPUState *cpu)
kvm_arch_pre_run(cpu, run);
if (qatomic_read(&cpu->exit_request)) {
DPRINTF("interrupt exit requested\n");
trace_kvm_interrupt_exit_request();
/*
* KVM requires us to reenter the kernel after IO exits to complete
* instruction emulation. This self-signal will ensure that we
@ -2868,17 +2872,17 @@ int kvm_cpu_exec(CPUState *cpu)
#ifdef KVM_HAVE_MCE_INJECTION
if (unlikely(have_sigbus_pending)) {
qemu_mutex_lock_iothread();
bql_lock();
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
pending_sigbus_addr);
have_sigbus_pending = false;
qemu_mutex_unlock_iothread();
bql_unlock();
}
#endif
if (run_ret < 0) {
if (run_ret == -EINTR || run_ret == -EAGAIN) {
DPRINTF("io window exit\n");
trace_kvm_io_window_exit();
kvm_eat_signals(cpu);
ret = EXCP_INTERRUPT;
break;
@ -2900,7 +2904,6 @@ int kvm_cpu_exec(CPUState *cpu)
trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
switch (run->exit_reason) {
case KVM_EXIT_IO:
DPRINTF("handle_io\n");
/* Called outside BQL */
kvm_handle_io(run->io.port, attrs,
(uint8_t *)run + run->io.data_offset,
@ -2910,7 +2913,6 @@ int kvm_cpu_exec(CPUState *cpu)
ret = 0;
break;
case KVM_EXIT_MMIO:
DPRINTF("handle_mmio\n");
/* Called outside BQL */
address_space_rw(&address_space_memory,
run->mmio.phys_addr, attrs,
@ -2920,11 +2922,9 @@ int kvm_cpu_exec(CPUState *cpu)
ret = 0;
break;
case KVM_EXIT_IRQ_WINDOW_OPEN:
DPRINTF("irq_window_open\n");
ret = EXCP_INTERRUPT;
break;
case KVM_EXIT_SHUTDOWN:
DPRINTF("shutdown\n");
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
break;
@ -2942,7 +2942,7 @@ int kvm_cpu_exec(CPUState *cpu)
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
*/
trace_kvm_dirty_ring_full(cpu->cpu_index);
qemu_mutex_lock_iothread();
bql_lock();
/*
* We throttle vCPU by making it sleep once it exit from kernel
* due to dirty ring full. In the dirtylimit scenario, reaping
@ -2954,11 +2954,12 @@ int kvm_cpu_exec(CPUState *cpu)
} else {
kvm_dirty_ring_reap(kvm_state, NULL);
}
qemu_mutex_unlock_iothread();
bql_unlock();
dirtylimit_vcpu_execute(cpu);
ret = 0;
break;
case KVM_EXIT_SYSTEM_EVENT:
trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type);
switch (run->system_event.type) {
case KVM_SYSTEM_EVENT_SHUTDOWN:
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
@ -2970,26 +2971,24 @@ int kvm_cpu_exec(CPUState *cpu)
break;
case KVM_SYSTEM_EVENT_CRASH:
kvm_cpu_synchronize_state(cpu);
qemu_mutex_lock_iothread();
bql_lock();
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
qemu_mutex_unlock_iothread();
bql_unlock();
ret = 0;
break;
default:
DPRINTF("kvm_arch_handle_exit\n");
ret = kvm_arch_handle_exit(cpu, run);
break;
}
break;
default:
DPRINTF("kvm_arch_handle_exit\n");
ret = kvm_arch_handle_exit(cpu, run);
break;
}
} while (ret == 0);
cpu_exec_end(cpu);
qemu_mutex_lock_iothread();
bql_lock();
if (ret < 0) {
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
@ -3601,6 +3600,24 @@ static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
s->kvm_dirty_ring_size = value;
}
static char *kvm_get_device(Object *obj,
Error **errp G_GNUC_UNUSED)
{
KVMState *s = KVM_STATE(obj);
return g_strdup(s->device);
}
static void kvm_set_device(Object *obj,
const char *value,
Error **errp G_GNUC_UNUSED)
{
KVMState *s = KVM_STATE(obj);
g_free(s->device);
s->device = g_strdup(value);
}
static void kvm_accel_instance_init(Object *obj)
{
KVMState *s = KVM_STATE(obj);
@ -3619,6 +3636,7 @@ static void kvm_accel_instance_init(Object *obj)
s->xen_version = 0;
s->xen_gnttab_max_frames = 64;
s->xen_evtchn_max_pirq = 256;
s->device = NULL;
}
/**
@ -3659,6 +3677,10 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "dirty-ring-size",
"Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device);
object_class_property_set_description(oc, "device",
"Path to the device node to use (default: /dev/kvm)");
kvm_arch_accel_class_init(oc);
}

View File

@ -25,4 +25,9 @@ kvm_dirty_ring_reaper(const char *s) "%s"
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
kvm_dirty_ring_flush(int finished) "%d"
kvm_destroy_vcpu(void) ""
kvm_failed_get_vcpu_mmap_size(void) ""
kvm_cpu_exec(void) ""
kvm_interrupt_exit_request(void) ""
kvm_io_window_exit(void) ""
kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32

View File

@ -124,3 +124,8 @@ uint32_t kvm_dirty_ring_size(void)
{
return 0;
}
bool kvm_hwpoisoned_mem(void)
{
return false;
}

View File

@ -30,9 +30,6 @@
#include "qemu/rcu.h"
#include "exec/log.h"
#include "qemu/main-loop.h"
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
#include "sysemu/cpus.h"
#include "exec/cpu-all.h"
#include "sysemu/cpu-timers.h"
@ -183,7 +180,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if (tb->pc == desc->pc &&
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
tb_page_addr0(tb) == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
@ -233,7 +230,7 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
return NULL;
}
desc.page_addr0 = phys_pc;
h = tb_hash_func(phys_pc, pc,
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
flags, cs_base, cflags);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@ -253,43 +250,29 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
hash = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
if (cflags & CF_PCREL) {
/* Use acquire to ensure current load of pc from jc. */
tb = qatomic_load_acquire(&jc->array[hash].tb);
if (likely(tb &&
jc->array[hash].pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
jc->array[hash].pc = pc;
/* Ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
} else {
/* Use rcu_read to ensure current load of pc from *tb. */
tb = qatomic_rcu_read(&jc->array[hash].tb);
if (likely(tb &&
tb->pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
tb = qatomic_read(&jc->array[hash].tb);
if (likely(tb &&
jc->array[hash].pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb_cflags(tb) == cflags)) {
goto hit;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
jc->array[hash].pc = pc;
qatomic_set(&jc->array[hash].tb, tb);
hit:
/*
* As long as tb is not NULL, the contents are consistent. Therefore,
* the virtual PC has to match for non-CF_PCREL translations.
*/
assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
return tb;
}
@ -357,9 +340,9 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
#ifdef CONFIG_USER_ONLY
g_assert_not_reached();
#else
CPUClass *cc = CPU_GET_CLASS(cpu);
assert(cc->tcg_ops->debug_check_breakpoint);
match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
assert(tcg_ops->debug_check_breakpoint);
match_bp = tcg_ops->debug_check_breakpoint(cpu);
#endif
}
@ -413,6 +396,14 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
uint64_t cs_base;
uint32_t flags, cflags;
/*
* By definition we've just finished a TB, so I/O is OK.
* Avoid the possibility of calling cpu_io_recompile() if
* a page table walk triggered by tb_lookup() calling
* probe_access_internal() happens to touch an MMIO device.
* The next TB, if we chain to it, will clear the flag again.
*/
cpu->neg.can_do_io = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
cflags = curr_cflags(cpu);
@ -445,7 +436,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
static inline TranslationBlock * QEMU_DISABLE_CFI
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
{
CPUArchState *env = cpu_env(cpu);
uintptr_t ret;
TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr;
@ -455,7 +445,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
}
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
ret = tcg_qemu_tb_exec(cpu_env(cpu), tb_ptr);
cpu->neg.can_do_io = true;
qemu_plugin_disable_mem_helpers(cpu);
/*
@ -476,10 +466,11 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
* counter hit zero); we must restore the guest PC to the address
* of the start of the TB.
*/
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUClass *cc = cpu->cc;
const TCGCPUOps *tcg_ops = cc->tcg_ops;
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
if (tcg_ops->synchronize_from_tb) {
tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
assert(cc->set_pc);
@ -511,19 +502,19 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
static void cpu_exec_enter(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (cc->tcg_ops->cpu_exec_enter) {
cc->tcg_ops->cpu_exec_enter(cpu);
if (tcg_ops->cpu_exec_enter) {
tcg_ops->cpu_exec_enter(cpu);
}
}
static void cpu_exec_exit(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (cc->tcg_ops->cpu_exec_exit) {
cc->tcg_ops->cpu_exec_exit(cpu);
if (tcg_ops->cpu_exec_exit) {
tcg_ops->cpu_exec_exit(cpu);
}
}
@ -558,8 +549,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
tcg_ctx->gen_tb = NULL;
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
if (bql_locked()) {
bql_unlock();
}
assert_no_pages_locked();
}
@ -677,15 +668,11 @@ static inline bool cpu_handle_halt(CPUState *cpu)
{
#ifndef CONFIG_USER_ONLY
if (cpu->halted) {
#if defined(TARGET_I386)
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
X86CPU *x86_cpu = X86_CPU(cpu);
qemu_mutex_lock_iothread();
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
qemu_mutex_unlock_iothread();
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (tcg_ops->cpu_exec_halt) {
tcg_ops->cpu_exec_halt(cpu);
}
#endif /* TARGET_I386 */
if (!cpu_has_work(cpu)) {
return true;
}
@ -699,7 +686,7 @@ static inline bool cpu_handle_halt(CPUState *cpu)
static inline void cpu_handle_debug_exception(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
CPUWatchpoint *wp;
if (!cpu->watchpoint_hit) {
@ -708,8 +695,8 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
}
}
if (cc->tcg_ops->debug_excp_handler) {
cc->tcg_ops->debug_excp_handler(cpu);
if (tcg_ops->debug_excp_handler) {
tcg_ops->debug_excp_handler(cpu);
}
}
@ -744,6 +731,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
#endif
return false;
}
if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
*ret = cpu->exception_index;
@ -752,62 +740,59 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
}
cpu->exception_index = -1;
return true;
} else {
#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
#if defined(TARGET_I386)
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->tcg_ops->fake_user_interrupt(cpu);
#endif /* TARGET_I386 */
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
#else
if (replay_exception()) {
CPUClass *cc = CPU_GET_CLASS(cpu);
qemu_mutex_lock_iothread();
cc->tcg_ops->do_interrupt(cpu);
qemu_mutex_unlock_iothread();
cpu->exception_index = -1;
}
if (unlikely(cpu->singlestep_enabled)) {
/*
* After processing the exception, ensure an EXCP_DEBUG is
* raised when single-stepping so that GDB doesn't miss the
* next instruction.
*/
*ret = EXCP_DEBUG;
cpu_handle_debug_exception(cpu);
return true;
}
} else if (!replay_has_interrupt()) {
/* give a chance to iothread in replay mode */
*ret = EXCP_INTERRUPT;
#if defined(CONFIG_USER_ONLY)
/*
* If user mode only, we simulate a fake exception which will be
* handled outside the cpu execution loop.
*/
#if defined(TARGET_I386)
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
tcg_ops->fake_user_interrupt(cpu);
#endif /* TARGET_I386 */
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
#else
if (replay_exception()) {
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
bql_lock();
tcg_ops->do_interrupt(cpu);
bql_unlock();
cpu->exception_index = -1;
if (unlikely(cpu->singlestep_enabled)) {
/*
* After processing the exception, ensure an EXCP_DEBUG is
* raised when single-stepping so that GDB doesn't miss the
* next instruction.
*/
*ret = EXCP_DEBUG;
cpu_handle_debug_exception(cpu);
return true;
}
#endif
} else if (!replay_has_interrupt()) {
/* give a chance to iothread in replay mode */
*ret = EXCP_INTERRUPT;
return true;
}
#endif
return false;
}
#ifndef CONFIG_USER_ONLY
/*
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
* "real" interrupt event later. It does not need to be recorded for
* replay purposes.
*/
static inline bool need_replay_interrupt(int interrupt_request)
static inline bool icount_exit_request(CPUState *cpu)
{
#if defined(TARGET_I386)
return !(interrupt_request & CPU_INTERRUPT_POLL);
#else
return true;
#endif
if (!icount_enabled()) {
return false;
}
if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) {
return false;
}
return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
}
#endif /* !CONFIG_USER_ONLY */
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
@ -830,7 +815,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
qemu_mutex_lock_iothread();
bql_lock();
interrupt_request = cpu->interrupt_request;
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
@ -839,7 +824,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#if !defined(CONFIG_USER_ONLY)
@ -850,7 +835,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#if defined(TARGET_I386)
@ -861,14 +846,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#endif /* !TARGET_I386 */
@ -877,11 +862,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
CPUClass *cc = CPU_GET_CLASS(cpu);
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (cc->tcg_ops->cpu_exec_interrupt &&
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (need_replay_interrupt(interrupt_request)) {
if (tcg_ops->cpu_exec_interrupt &&
tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (!tcg_ops->need_replay_interrupt ||
tcg_ops->need_replay_interrupt(interrupt_request)) {
replay_interrupt();
}
/*
@ -891,7 +877,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
*/
if (unlikely(cpu->singlestep_enabled)) {
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
cpu->exception_index = -1;
@ -910,14 +896,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
qemu_mutex_unlock_iothread();
bql_unlock();
}
/* Finally, check if we need to exit to the main loop. */
if (unlikely(qatomic_read(&cpu->exit_request))
|| (icount_enabled()
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) {
if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT;
@ -1038,14 +1021,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
*/
h = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
if (cflags & CF_PCREL) {
jc->array[h].pc = pc;
/* Ensure pc is written first. */
qatomic_store_release(&jc->array[h].tb, tb);
} else {
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[h].tb, tb);
}
jc->array[h].pc = pc;
qatomic_set(&jc->array[h].tb, tb);
}
#ifndef CONFIG_USER_ONLY
@ -1126,7 +1103,7 @@ int cpu_exec(CPUState *cpu)
return EXCP_HALTED;
}
rcu_read_lock();
RCU_READ_LOCK_GUARD();
cpu_exec_enter(cpu);
/*
@ -1140,18 +1117,15 @@ int cpu_exec(CPUState *cpu)
ret = cpu_exec_setjmp(cpu, &sc);
cpu_exec_exit(cpu);
rcu_read_unlock();
return ret;
}
bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
{
static bool tcg_target_initialized;
CPUClass *cc = CPU_GET_CLASS(cpu);
if (!tcg_target_initialized) {
cc->tcg_ops->initialize();
cpu->cc->tcg_ops->initialize();
tcg_target_initialized = true;
}

View File

@ -1152,14 +1152,11 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
" prot=%x idx=%d\n",
addr, full->phys_addr, prot, mmu_idx);
read_flags = 0;
read_flags = full->tlb_fill_flags;
if (full->lg_page_size < TARGET_PAGE_BITS) {
/* Repeat the MMU check and TLB fill on every access. */
read_flags |= TLB_INVALID_MASK;
}
if (full->attrs.byte_swap) {
read_flags |= TLB_BSWAP;
}
is_ram = memory_region_is_ram(section->mr);
is_romd = memory_region_is_romd(section->mr);
@ -1463,9 +1460,8 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
||
(access_type != MMU_INST_FETCH && force_mmio)) {
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
|| (access_type != MMU_INST_FETCH && force_mmio)) {
*phost = NULL;
return TLB_MMIO;
}
@ -1615,7 +1611,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void *p;
(void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
cpu_mmu_index(env, true), false,
cpu_mmu_index(env_cpu(env), true), false,
&p, &full, 0, false);
if (p == NULL) {
return -1;
@ -1872,6 +1868,31 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0);
}
/*
* This alignment check differs from the one above, in that this is
* based on the atomicity of the operation. The intended use case is
* the ARM memory type field of each PTE, where access to pages with
* Device memory type require alignment.
*/
if (unlikely(flags & TLB_CHECK_ALIGNED)) {
MemOp size = l->memop & MO_SIZE;
switch (l->memop & MO_ATOM_MASK) {
case MO_ATOM_NONE:
size = MO_8;
break;
case MO_ATOM_IFALIGN_PAIR:
case MO_ATOM_WITHIN16_PAIR:
size = size ? size - 1 : 0;
break;
default:
break;
}
if (addr & ((1 << size) - 1)) {
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
}
return crosspage;
}
@ -2014,7 +2035,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
* Context: iothread lock held
* Context: BQL held
*
* Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be.
@ -2061,7 +2082,6 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
MemoryRegion *mr;
hwaddr mr_offset;
MemTxAttrs attrs;
uint64_t ret;
tcg_debug_assert(size > 0 && size <= 8);
@ -2069,12 +2089,9 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
type, ra, mr, mr_offset);
qemu_mutex_unlock_iothread();
return ret;
BQL_LOCK_GUARD();
return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
type, ra, mr, mr_offset);
}
static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
@ -2093,13 +2110,11 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
BQL_LOCK_GUARD();
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset);
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
qemu_mutex_unlock_iothread();
return int128_make128(b, a);
}
@ -2541,7 +2556,6 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
MMU_DATA_LOAD, l.memop, ra);
ret = int128_make128(b, a);
}
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap128(ret);
}
@ -2561,7 +2575,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
* Context: iothread lock held
* Context: BQL held
*
* Store @size bytes at @addr, which is memory-mapped i/o.
* The bytes to store are extracted in little-endian order from @val_le;
@ -2609,7 +2623,6 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
hwaddr mr_offset;
MemoryRegion *mr;
MemTxAttrs attrs;
uint64_t ret;
tcg_debug_assert(size > 0 && size <= 8);
@ -2617,12 +2630,9 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
ra, mr, mr_offset);
qemu_mutex_unlock_iothread();
return ret;
BQL_LOCK_GUARD();
return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
ra, mr, mr_offset);
}
static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
@ -2633,7 +2643,6 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
MemoryRegion *mr;
hwaddr mr_offset;
MemTxAttrs attrs;
uint64_t ret;
tcg_debug_assert(size > 8 && size <= 16);
@ -2641,14 +2650,11 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
BQL_LOCK_GUARD();
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
mmu_idx, ra, mr, mr_offset);
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
size - 8, mmu_idx, ra, mr, mr_offset + 8);
qemu_mutex_unlock_iothread();
return ret;
return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
size - 8, mmu_idx, ra, mr, mr_offset + 8);
}
/*
@ -2999,26 +3005,30 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
CPUState *cs = env_cpu(env);
MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
CPUState *cs = env_cpu(env);
MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
CPUState *cs = env_cpu(env);
MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
CPUState *cs = env_cpu(env);
MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
}
uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,

View File

@ -49,21 +49,19 @@ static bool icount_sleep = true;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10
/*
* 0 = Do not count executed instructions.
* 1 = Fixed conversion of insn to ns via "shift" option
* 2 = Runtime adaptive algorithm to compute shift
*/
int use_icount;
/* Do not count executed instructions */
ICountMode use_icount = ICOUNT_DISABLED;
static void icount_enable_precise(void)
{
use_icount = 1;
/* Fixed conversion of insn to ns via "shift" option */
use_icount = ICOUNT_PRECISE;
}
static void icount_enable_adaptive(void)
{
use_icount = 2;
/* Runtime adaptive algorithm to compute shift */
use_icount = ICOUNT_ADAPTATIVE;
}
/*
@ -256,7 +254,7 @@ static void icount_warp_rt(void)
int64_t warp_delta;
warp_delta = clock - timers_state.vm_clock_warp_start;
if (icount_enabled() == 2) {
if (icount_enabled() == ICOUNT_ADAPTATIVE) {
/*
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far
* ahead of real time (it might already be ahead so careful not
@ -419,7 +417,7 @@ void icount_account_warp_timer(void)
icount_warp_rt();
}
void icount_configure(QemuOpts *opts, Error **errp)
bool icount_configure(QemuOpts *opts, Error **errp)
{
const char *option = qemu_opt_get(opts, "shift");
bool sleep = qemu_opt_get_bool(opts, "sleep", true);
@ -429,27 +427,28 @@ void icount_configure(QemuOpts *opts, Error **errp)
if (!option) {
if (qemu_opt_get(opts, "align") != NULL) {
error_setg(errp, "Please specify shift option when using align");
return false;
}
return;
return true;
}
if (align && !sleep) {
error_setg(errp, "align=on and sleep=off are incompatible");
return;
return false;
}
if (strcmp(option, "auto") != 0) {
if (qemu_strtol(option, NULL, 0, &time_shift) < 0
|| time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
error_setg(errp, "icount: Invalid shift value");
return;
return false;
}
} else if (icount_align_option) {
error_setg(errp, "shift=auto and align=on are incompatible");
return;
return false;
} else if (!icount_sleep) {
error_setg(errp, "shift=auto and sleep=off are incompatible");
return;
return false;
}
icount_sleep = sleep;
@ -463,7 +462,7 @@ void icount_configure(QemuOpts *opts, Error **errp)
if (time_shift >= 0) {
timers_state.icount_time_shift = time_shift;
icount_enable_precise();
return;
return true;
}
icount_enable_adaptive();
@ -491,11 +490,14 @@ void icount_configure(QemuOpts *opts, Error **errp)
timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
return true;
}
void icount_notify_exit(void)
{
if (icount_enabled() && current_cpu) {
assert(icount_enabled());
if (current_cpu) {
qemu_cpu_kick(current_cpu);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}

View File

@ -76,7 +76,7 @@ static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
/*
* Examine the alignment of p to determine if there are subobjects
* that must be aligned. Note that we only really need ctz4() --
* any more sigificant bits are discarded by the immediately
* any more significant bits are discarded by the immediately
* following comparison.
*/
tmp = ctz32(p);

View File

@ -354,7 +354,8 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
}
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
@ -364,7 +365,8 @@ int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
}
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
@ -374,17 +376,20 @@ int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
}
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
}
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
}
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
@ -394,54 +399,63 @@ int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
}
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
}
void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
}
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
}
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
}
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, uintptr_t ra)
{
cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
}
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
}
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
}
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, uintptr_t ra)
{
cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
int mmu_index = cpu_mmu_index(env_cpu(env), false);
cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
}
/*--------------------------*/

View File

@ -1,8 +1,8 @@
tcg_ss = ss.source_set()
common_ss.add(when: 'CONFIG_TCG', if_true: files(
'cpu-exec-common.c',
))
tcg_ss.add(files(
tcg_specific_ss = ss.source_set()
tcg_specific_ss.add(files(
'tcg-all.c',
'cpu-exec.c',
'tb-maint.c',
@ -11,17 +11,16 @@ tcg_ss.add(files(
'translate-all.c',
'translator.c',
))
tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
tcg_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
tcg_specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
if get_option('plugins')
tcg_ss.add(files('plugin-gen.c'))
tcg_specific_ss.add(files('plugin-gen.c'))
endif
tcg_ss.add(when: libdw, if_true: files('debuginfo.c'))
tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c'))
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss)
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
'cputlb.c',
'watchpoint.c',
))
system_ss.add(when: ['CONFIG_TCG'], if_true: files(

View File

@ -43,6 +43,7 @@
* CPU's index into a TCG temp, since the first callback did it already.
*/
#include "qemu/osdep.h"
#include "qemu/plugin.h"
#include "cpu.h"
#include "tcg/tcg.h"
#include "tcg/tcg-temp-internal.h"
@ -56,12 +57,6 @@
#include "exec/helper-info.c.inc"
#undef HELPER_H
#ifdef CONFIG_SOFTMMU
# define CONFIG_SOFTMMU_GATE 1
#else
# define CONFIG_SOFTMMU_GATE 0
#endif
/*
* plugin_cb_start TCG op args[]:
* 0: enum plugin_gen_from
@ -79,6 +74,7 @@ enum plugin_gen_from {
enum plugin_gen_cb {
PLUGIN_GEN_CB_UDATA,
PLUGIN_GEN_CB_UDATA_R,
PLUGIN_GEN_CB_INLINE,
PLUGIN_GEN_CB_MEM,
PLUGIN_GEN_ENABLE_MEM_HELPER,
@ -90,7 +86,10 @@ enum plugin_gen_cb {
* These helpers are stubs that get dynamically switched out for calls
* direct to the plugin if they are subscribed to.
*/
void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
void HELPER(plugin_vcpu_udata_cb_no_wg)(uint32_t cpu_index, void *udata)
{ }
void HELPER(plugin_vcpu_udata_cb_no_rwg)(uint32_t cpu_index, void *udata)
{ }
void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
@ -98,7 +97,7 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
void *userdata)
{ }
static void gen_empty_udata_cb(void)
static void gen_empty_udata_cb(void (*gen_helper)(TCGv_i32, TCGv_ptr))
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
@ -106,28 +105,50 @@ static void gen_empty_udata_cb(void)
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
gen_helper(cpu_index, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i32(cpu_index);
}
static void gen_empty_udata_cb_no_wg(void)
{
gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_wg);
}
static void gen_empty_udata_cb_no_rwg(void)
{
gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_rwg);
}
/*
* For now we only support addi_i64.
* When we support more ops, we can generate one empty inline cb for each.
*/
static void gen_empty_inline_cb(void)
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr cpu_index_as_ptr = tcg_temp_ebb_new_ptr();
TCGv_i64 val = tcg_temp_ebb_new_i64();
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
/* second operand will be replaced by immediate value */
tcg_gen_mul_i32(cpu_index, cpu_index, cpu_index);
tcg_gen_ext_i32_ptr(cpu_index_as_ptr, cpu_index);
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_add_ptr(ptr, ptr, cpu_index_as_ptr);
tcg_gen_ld_i64(val, ptr, 0);
/* pass an immediate != 0 so that it doesn't get optimized away */
tcg_gen_addi_i64(val, val, 0xdeadface);
/* second operand will be replaced by immediate value */
tcg_gen_add_i64(val, val, val);
tcg_gen_st_i64(val, ptr, 0);
tcg_temp_free_ptr(ptr);
tcg_temp_free_i64(val);
tcg_temp_free_ptr(cpu_index_as_ptr);
tcg_temp_free_i32(cpu_index);
}
static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
@ -192,7 +213,8 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
gen_empty_mem_helper);
/* fall through */
case PLUGIN_GEN_FROM_TB:
gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb_no_rwg);
gen_wrapped(from, PLUGIN_GEN_CB_UDATA_R, gen_empty_udata_cb_no_wg);
gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
break;
default:
@ -274,12 +296,37 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
return op;
}
static TCGOp *copy_ld_i32(TCGOp **begin_op, TCGOp *op)
{
return copy_op(begin_op, op, INDEX_op_ld_i32);
}
static TCGOp *copy_ext_i32_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
op = copy_op(begin_op, op, INDEX_op_ext_i32_i64);
}
return op;
}
static TCGOp *copy_add_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
op = copy_op(begin_op, op, INDEX_op_add_i32);
} else {
op = copy_op(begin_op, op, INDEX_op_add_i64);
}
return op;
}
static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
/* 2x ld_i32 */
op = copy_op(begin_op, op, INDEX_op_ld_i32);
op = copy_op(begin_op, op, INDEX_op_ld_i32);
op = copy_ld_i32(begin_op, op);
op = copy_ld_i32(begin_op, op);
} else {
/* ld_i64 */
op = copy_op(begin_op, op, INDEX_op_ld_i64);
@ -315,6 +362,13 @@ static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
return op;
}
static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v)
{
op = copy_op(begin_op, op, INDEX_op_mul_i32);
op->args[2] = tcgv_i32_arg(tcg_constant_i32(v));
return op;
}
static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
@ -380,18 +434,19 @@ static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
TCGOp *begin_op, TCGOp *op,
int *unused)
{
/* const_ptr */
op = copy_const_ptr(&begin_op, op, cb->userp);
char *ptr = cb->inline_insn.entry.score->data->data;
size_t elem_size = g_array_get_element_size(
cb->inline_insn.entry.score->data);
size_t offset = cb->inline_insn.entry.offset;
/* ld_i64 */
op = copy_ld_i32(&begin_op, op);
op = copy_mul_i32(&begin_op, op, elem_size);
op = copy_ext_i32_ptr(&begin_op, op);
op = copy_const_ptr(&begin_op, op, ptr + offset);
op = copy_add_ptr(&begin_op, op);
op = copy_ld_i64(&begin_op, op);
/* add_i64 */
op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
/* st_i64 */
op = copy_st_i64(&begin_op, op);
return op;
}
@ -588,6 +643,12 @@ static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
}
static void plugin_gen_tb_udata_r(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op)
{
inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR_R], begin_op);
}
static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op)
{
@ -602,6 +663,14 @@ static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
}
static void plugin_gen_insn_udata_r(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR_R], begin_op);
}
static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
@ -721,6 +790,9 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
case PLUGIN_GEN_CB_UDATA:
plugin_gen_tb_udata(plugin_tb, op);
break;
case PLUGIN_GEN_CB_UDATA_R:
plugin_gen_tb_udata_r(plugin_tb, op);
break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_tb_inline(plugin_tb, op);
break;
@ -737,6 +809,9 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
case PLUGIN_GEN_CB_UDATA:
plugin_gen_insn_udata(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_CB_UDATA_R:
plugin_gen_insn_udata_r(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_insn_inline(plugin_tb, op, insn_idx);
break;
@ -796,7 +871,7 @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
{
bool ret = false;
if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) {
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
int i;

View File

@ -1,4 +1,5 @@
#ifdef CONFIG_PLUGIN
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_wg, TCG_CALL_NO_WG | TCG_CALL_PLUGIN, void, i32, ptr)
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_rwg, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, i32, i64, ptr)
#endif

View File

@ -13,9 +13,11 @@
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/*
* Accessed in parallel; all accesses to 'tb' must be atomic.
* For CF_PCREL, accesses to 'pc' must be protected by a
* load_acquire/store_release to 'tb'.
* Invalidated in parallel; all accesses to 'tb' must be atomic.
* A valid entry is read/written by a single CPU, therefore there is
* no need for qatomic_rcu_read() and pc is always consistent with a
* non-NULL value of 'tb'. Strictly speaking pc is only needed for
* CF_PCREL, but it's used always for simplicity.
*/
struct CPUJumpCache {
struct rcu_head rcu;

View File

@ -47,7 +47,7 @@ static bool tb_cmp(const void *ap, const void *bp)
const TranslationBlock *a = ap;
const TranslationBlock *b = bp;
return (a->pc == b->pc &&
return ((tb_cflags(a) & CF_PCREL || a->pc == b->pc) &&
a->cs_base == b->cs_base &&
a->flags == b->flags &&
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
@ -916,7 +916,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */
phys_pc = tb_page_addr0(tb);
h = tb_hash_func(phys_pc, tb->pc,
h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cs_base, orig_cflags);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
@ -983,7 +983,7 @@ TranslationBlock *tb_link_page(TranslationBlock *tb)
tb_record(tb);
/* add in the hash table */
h = tb_hash_func(tb_page_addr0(tb), tb->pc,
h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cs_base, tb->cflags);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
@ -1021,7 +1021,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
* Called with mmap_lock held for user-mode emulation
* NOTE: this function must not be called while a TB is running.
*/
void tb_invalidate_phys_page(tb_page_addr_t addr)
static void tb_invalidate_phys_page(tb_page_addr_t addr)
{
tb_page_addr_t start, last;
@ -1160,28 +1160,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
#endif
}
/*
* Invalidate all TBs which intersect with the target physical
* address page @addr.
*/
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
tb_page_addr_t start, last;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
if (p == NULL) {
return;
}
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
pages = page_collection_lock(start, last);
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
page_collection_unlock(pages);
}
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;last]. NOTE: start and end may refer to *different* physical pages.

View File

@ -123,12 +123,12 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
if (cpu->icount_budget == 0) {
/*
* We're called without the iothread lock, so must take it while
* We're called without the BQL, so must take it while
* we're calling timer handlers.
*/
qemu_mutex_lock_iothread();
bql_lock();
icount_notify_aio_contexts();
qemu_mutex_unlock_iothread();
bql_unlock();
}
}

View File

@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
rcu_add_force_rcu_notifier(&force_rcu.notifier);
tcg_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
do {
if (cpu_can_run(cpu)) {
int r;
qemu_mutex_unlock_iothread();
r = tcg_cpus_exec(cpu);
qemu_mutex_lock_iothread();
bql_unlock();
r = tcg_cpu_exec(cpu);
bql_lock();
switch (r) {
case EXCP_DEBUG:
cpu_handle_guest_debug(cpu);
@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
*/
break;
case EXCP_ATOMIC:
qemu_mutex_unlock_iothread();
bql_unlock();
cpu_exec_step_atomic(cpu);
qemu_mutex_lock_iothread();
bql_lock();
default:
/* Ignore everything else? */
break;
@ -118,8 +118,8 @@ static void *mttcg_cpu_thread_fn(void *arg)
qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
tcg_cpus_destroy(cpu);
qemu_mutex_unlock_iothread();
tcg_cpu_destroy(cpu);
bql_unlock();
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
rcu_unregister_thread();
return NULL;

View File

@ -109,9 +109,9 @@ static void rr_wait_io_event(void)
{
CPUState *cpu;
while (all_cpu_threads_idle()) {
while (all_cpu_threads_idle() && replay_can_wait()) {
rr_stop_kick_timer();
qemu_cond_wait_iothread(first_cpu->halt_cond);
qemu_cond_wait_bql(first_cpu->halt_cond);
}
rr_start_kick_timer();
@ -131,7 +131,7 @@ static void rr_deal_with_unplugged_cpus(void)
CPU_FOREACH(cpu) {
if (cpu->unplug && !cpu_can_run(cpu)) {
tcg_cpus_destroy(cpu);
tcg_cpu_destroy(cpu);
break;
}
}
@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg)
rcu_add_force_rcu_notifier(&force_rcu);
tcg_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@ -198,7 +198,7 @@ static void *rr_cpu_thread_fn(void *arg)
/* wait for initial kick-off after machine start */
while (first_cpu->stopped) {
qemu_cond_wait_iothread(first_cpu->halt_cond);
qemu_cond_wait_bql(first_cpu->halt_cond);
/* process any pending work */
CPU_FOREACH(cpu) {
@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg)
/* Only used for icount_enabled() */
int64_t cpu_budget = 0;
qemu_mutex_unlock_iothread();
bql_unlock();
replay_mutex_lock();
qemu_mutex_lock_iothread();
bql_lock();
if (icount_enabled()) {
int cpu_count = rr_cpu_count();
@ -254,23 +254,23 @@ static void *rr_cpu_thread_fn(void *arg)
if (cpu_can_run(cpu)) {
int r;
qemu_mutex_unlock_iothread();
bql_unlock();
if (icount_enabled()) {
icount_prepare_for_run(cpu, cpu_budget);
}
r = tcg_cpus_exec(cpu);
r = tcg_cpu_exec(cpu);
if (icount_enabled()) {
icount_process_data(cpu);
}
qemu_mutex_lock_iothread();
bql_lock();
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
break;
} else if (r == EXCP_ATOMIC) {
qemu_mutex_unlock_iothread();
bql_unlock();
cpu_exec_step_atomic(cpu);
qemu_mutex_lock_iothread();
bql_lock();
break;
}
} else if (cpu->stop) {

View File

@ -63,12 +63,12 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
cpu->tcg_cflags |= cflags;
}
void tcg_cpus_destroy(CPUState *cpu)
void tcg_cpu_destroy(CPUState *cpu)
{
cpu_thread_signal_destroyed(cpu);
}
int tcg_cpus_exec(CPUState *cpu)
int tcg_cpu_exec(CPUState *cpu)
{
int ret;
assert(tcg_enabled());
@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
/* mask must never be zero, except for A20 change call */
void tcg_handle_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
cpu->interrupt_request |= mask;

View File

@ -14,8 +14,8 @@
#include "sysemu/cpus.h"
void tcg_cpus_destroy(CPUState *cpu);
int tcg_cpus_exec(CPUState *cpu);
void tcg_cpu_destroy(CPUState *cpu);
int tcg_cpu_exec(CPUState *cpu);
void tcg_handle_interrupt(CPUState *cpu, int mask);
void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);

View File

@ -63,7 +63,7 @@
#include "tb-context.h"
#include "internal-common.h"
#include "internal-target.h"
#include "perf.h"
#include "tcg/perf.h"
#include "tcg/insn-start-words.h"
//// --- Begin LibAFL code ---
@ -262,7 +262,6 @@ bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
void page_init(void)
{
page_size_init();
page_table_config_init();
}
@ -865,7 +864,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
vaddr pc = log_pc(cpu, tb);
vaddr pc = cpu->cc->get_pc(cpu);
if (qemu_log_in_addr_range(pc)) {
qemu_log("cpu_io_recompile: rewound execution of TB to %016"
VADDR_PRIx "\n", pc);
@ -879,7 +878,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
cpu->interrupt_request |= mask;
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
}

View File

@ -18,20 +18,14 @@
static void set_can_do_io(DisasContextBase *db, bool val)
{
if (db->saved_can_do_io != val) {
db->saved_can_do_io = val;
QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
offsetof(ArchCPU, parent_obj.neg.can_do_io) -
offsetof(ArchCPU, env));
}
QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
offsetof(ArchCPU, parent_obj.neg.can_do_io) -
offsetof(ArchCPU, env));
}
bool translator_io_start(DisasContextBase *db)
{
set_can_do_io(db, true);
/*
* Ensure that this instruction will be the last in the TB.
* The target may override this to something more forceful.
@ -84,13 +78,6 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
- offsetof(ArchCPU, env));
}
/*
* cpu->neg.can_do_io is set automatically here at the beginning of
* each translation block. The cost is minimal, plus it would be
* very easy to forget doing it in the translator.
*/
set_can_do_io(db, db->max_insns == 1);
return icount_start_insn;
}
@ -140,6 +127,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
{
uint32_t cflags = tb_cflags(tb);
TCGOp *icount_start_insn;
TCGOp *first_insn_start = NULL;
bool plugin_enabled;
/* Initialize DisasContext */
@ -150,7 +138,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->num_insns = 0;
db->max_insns = *max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->saved_can_do_io = -1;
db->insn_start = NULL;
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
@ -168,6 +156,10 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
while (true) {
*max_insns = ++db->num_insns;
ops->insn_start(db, cpu);
db->insn_start = tcg_last_op();
if (first_insn_start == NULL) {
first_insn_start = db->insn_start;
}
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
if (plugin_enabled) {
@ -250,13 +242,11 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
* done next -- either exiting this loop or locate the start of
* the next instruction.
*/
if (db->num_insns == db->max_insns) {
/* Accept I/O on the last instruction. */
set_can_do_io(db, true);
}
ops->translate_insn(db, cpu);
//// --- Begin LibAFL code ---
post_translate_insn:
//// --- End LibAFL code ---
/*
* We can't instrument after instructions that change control
* flow although this only really affects post-load operations.
@ -287,6 +277,21 @@ post_translate_insn:
ops->tb_stop(db, cpu);
gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
/*
* Manage can_do_io for the translation block: set to false before
* the first insn and set to true before the last insn.
*/
if (db->num_insns == 1) {
tcg_debug_assert(first_insn_start == db->insn_start);
} else {
tcg_debug_assert(first_insn_start != db->insn_start);
tcg_ctx->emit_before_op = first_insn_start;
set_can_do_io(db, false);
}
tcg_ctx->emit_before_op = db->insn_start;
set_can_do_io(db, true);
tcg_ctx->emit_before_op = NULL;
if (plugin_enabled) {
plugin_gen_tb_end(cpu, db->num_insns);
}

View File

@ -651,16 +651,17 @@ void page_protect(tb_page_addr_t address)
{
PageFlagsNode *p;
target_ulong start, last;
int host_page_size = qemu_real_host_page_size();
int prot;
assert_memory_lock();
if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
if (host_page_size <= TARGET_PAGE_SIZE) {
start = address & TARGET_PAGE_MASK;
last = start + TARGET_PAGE_SIZE - 1;
} else {
start = address & qemu_host_page_mask;
last = start + qemu_host_page_size - 1;
start = address & -host_page_size;
last = start + host_page_size - 1;
}
p = pageflags_find(start, last);
@ -671,7 +672,7 @@ void page_protect(tb_page_addr_t address)
if (unlikely(p->itree.last < last)) {
/* More than one protection region covers the one host page. */
assert(TARGET_PAGE_SIZE < qemu_host_page_size);
assert(TARGET_PAGE_SIZE < host_page_size);
while ((p = pageflags_next(p, start, last)) != NULL) {
prot |= p->flags;
}
@ -679,7 +680,7 @@ void page_protect(tb_page_addr_t address)
if (prot & PAGE_WRITE) {
pageflags_set_clear(start, last, 0, PAGE_WRITE);
mprotect(g2h_untagged(start), qemu_host_page_size,
mprotect(g2h_untagged(start), last - start + 1,
prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
}
}
@ -725,18 +726,19 @@ int page_unprotect(target_ulong address, uintptr_t pc)
}
#endif
} else {
int host_page_size = qemu_real_host_page_size();
target_ulong start, len, i;
int prot;
if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
if (host_page_size <= TARGET_PAGE_SIZE) {
start = address & TARGET_PAGE_MASK;
len = TARGET_PAGE_SIZE;
prot = p->flags | PAGE_WRITE;
pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
} else {
start = address & qemu_host_page_mask;
len = qemu_host_page_size;
start = address & -host_page_size;
len = host_page_size;
prot = 0;
for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
@ -862,7 +864,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
typedef struct TargetPageDataNode {
struct rcu_head rcu;
IntervalTreeNode itree;
char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
char data[] __attribute__((aligned));
} TargetPageDataNode;
static IntervalTreeRoot targetdata_root;
@ -900,7 +902,8 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n_last = MIN(last, n->last);
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
p_len * TARGET_PAGE_DATA_SIZE);
}
}
@ -908,7 +911,7 @@ void *page_get_target_data(target_ulong address)
{
IntervalTreeNode *n;
TargetPageDataNode *t;
target_ulong page, region;
target_ulong page, region, p_ofs;
page = address & TARGET_PAGE_MASK;
region = address & TBD_MASK;
@ -924,7 +927,8 @@ void *page_get_target_data(target_ulong address)
mmap_lock();
n = interval_tree_iter_first(&targetdata_root, page, page);
if (!n) {
t = g_new0(TargetPageDataNode, 1);
t = g_malloc0(sizeof(TargetPageDataNode)
+ TPD_PAGES * TARGET_PAGE_DATA_SIZE);
n = &t->itree;
n->start = region;
n->last = region | ~TBD_MASK;
@ -934,7 +938,8 @@ void *page_get_target_data(target_ulong address)
}
t = container_of(n, TargetPageDataNode, itree);
return t->data[(page - region) >> TARGET_PAGE_BITS];
p_ofs = (page - region) >> TARGET_PAGE_BITS;
return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
}
#else
void page_reset_target_data(target_ulong start, target_ulong last) { }

143
accel/tcg/watchpoint.c Normal file
View File

@ -0,0 +1,143 @@
/*
* CPU watchpoints
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
#include "exec/exec-all.h"
#include "exec/translate-all.h"
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "hw/core/tcg-cpu-ops.h"
#include "hw/core/cpu.h"
/*
* Return true if this watchpoint address matches the specified
* access (ie the address range covered by the watchpoint overlaps
* partially or completely with the address range covered by the
* access).
*/
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr, vaddr len)
{
/*
* We know the lengths are non-zero, but a little caution is
* required to avoid errors in the case where the range ends
* exactly at the top of the address space and so addr + len
* wraps round to zero.
*/
vaddr wpend = wp->vaddr + wp->len - 1;
vaddr addrend = addr + len - 1;
return !(addr > wpend || wp->vaddr > addrend);
}
/* Return flags for watchpoints that match addr + prot. */
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
{
CPUWatchpoint *wp;
int ret = 0;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, len)) {
ret |= wp->flags;
}
}
return ret;
}
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
/*
* We re-entered the check after replacing the TB.
* Now raise the debug interrupt so that it will
* trigger after the current instruction.
*/
bql_lock();
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
bql_unlock();
return;
}
if (cc->tcg_ops->adjust_watchpoint_address) {
/* this is currently used only by ARM BE32 */
addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
}
assert((flags & ~BP_MEM_ACCESS) == 0);
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
int hit_flags = wp->flags & flags;
if (hit_flags && watchpoint_address_matches(wp, addr, len)) {
if (replay_running_debug()) {
/*
* replay_breakpoint reads icount.
* Force recompile to succeed, because icount may
* be read only at the end of the block.
*/
if (!cpu->neg.can_do_io) {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
cpu_loop_exit_restore(cpu, ra);
}
/*
* Don't process the watchpoints when we are
* in a reverse debugging operation.
*/
replay_breakpoint();
return;
}
wp->flags |= hit_flags << BP_HIT_SHIFT;
wp->hitaddr = MAX(addr, wp->vaddr);
wp->hitattrs = attrs;
if (wp->flags & BP_CPU
&& cc->tcg_ops->debug_check_watchpoint
&& !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
mmap_lock();
/* This call also restores vCPU state */
tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
} else {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}

View File

@ -15,6 +15,7 @@
#include "hw/xen/xen_native.h"
#include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/xen_pt.h"
#include "hw/xen/xen_igd.h"
#include "chardev/char.h"
#include "qemu/accel.h"
#include "sysemu/cpus.h"

View File

@ -1683,7 +1683,7 @@ static const VMStateDescription vmstate_audio = {
.version_id = 1,
.minimum_version_id = 1,
.needed = vmstate_audio_needed,
.fields = (VMStateField[]) {
.fields = (const VMStateField[]) {
VMSTATE_END_OF_LIST()
}
};

View File

@ -299,7 +299,7 @@ COREAUDIO_WRAPPER_FUNC(write, size_t, (HWVoiceOut *hw, void *buf, size_t size),
#undef COREAUDIO_WRAPPER_FUNC
/*
* callback to feed audiooutput buffer. called without iothread lock.
* callback to feed audiooutput buffer. called without BQL.
* allowed to lock "buf_mutex", but disallowed to have any other locks.
*/
static OSStatus audioDeviceIOProc(
@ -538,7 +538,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core)
}
}
/* called without iothread lock. */
/* called without BQL. */
static OSStatus handle_voice_change(
AudioObjectID in_object_id,
UInt32 in_number_addresses,
@ -547,7 +547,7 @@ static OSStatus handle_voice_change(
{
coreaudioVoiceOut *core = in_client_data;
qemu_mutex_lock_iothread();
bql_lock();
if (core->outputDeviceID) {
fini_out_device(core);
@ -557,7 +557,7 @@ static OSStatus handle_voice_change(
update_device_playback_state(core);
}
qemu_mutex_unlock_iothread();
bql_unlock();
return 0;
}

View File

@ -11,7 +11,6 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "audio.h"
#include <errno.h>
#include "qemu/error-report.h"
#include "qapi/error.h"
#include <spa/param/audio/format-utils.h>

View File

@ -1 +1,5 @@
source tpm/Kconfig
config IOMMUFD
bool
depends on VFIO

View File

@ -427,7 +427,9 @@ static int cryptodev_builtin_close_session(
CRYPTODEV_BACKEND_BUILTIN(backend);
CryptoDevBackendBuiltinSession *session;
assert(session_id < MAX_NUM_SESSIONS && builtin->sessions[session_id]);
if (session_id >= MAX_NUM_SESSIONS || !builtin->sessions[session_id]) {
return -VIRTIO_CRYPTO_INVSESS;
}
session = builtin->sessions[session_id];
if (session->cipher) {

View File

@ -393,7 +393,7 @@ static const VMStateDescription dbus_vmstate = {
.version_id = 0,
.pre_save = dbus_vmstate_pre_save,
.post_load = dbus_vmstate_post_load,
.fields = (VMStateField[]) {
.fields = (const VMStateField[]) {
VMSTATE_UINT32(data_size, DBusVMState),
VMSTATE_VBUFFER_ALLOC_UINT32(data, DBusVMState, 0, 0, data_size),
VMSTATE_END_OF_LIST()

View File

@ -17,31 +17,29 @@
#include "sysemu/hostmem.h"
#include "hw/i386/hostmem-epc.h"
static void
static bool
sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
g_autofree char *name = NULL;
uint32_t ram_flags;
char *name;
int fd;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return;
return false;
}
fd = qemu_open_old("/dev/sgx_vepc", O_RDWR);
if (fd < 0) {
error_setg_errno(errp, errno,
"failed to open /dev/sgx_vepc to alloc SGX EPC");
return;
return false;
}
name = object_get_canonical_path(OBJECT(backend));
ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED;
memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend),
name, backend->size, ram_flags,
fd, 0, errp);
g_free(name);
return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name,
backend->size, ram_flags, fd, 0, errp);
}
static void sgx_epc_backend_instance_init(Object *obj)

View File

@ -36,24 +36,25 @@ struct HostMemoryBackendFile {
OnOffAuto rom;
};
static void
static bool
file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
#ifndef CONFIG_POSIX
error_setg(errp, "backend '%s' not supported on this host",
object_get_typename(OBJECT(backend)));
return false;
#else
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend);
g_autofree gchar *name = NULL;
uint32_t ram_flags;
gchar *name;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return;
return false;
}
if (!fb->mem_path) {
error_setg(errp, "mem-path property not set");
return;
return false;
}
switch (fb->rom) {
@ -65,18 +66,18 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
if (!fb->readonly) {
error_setg(errp, "property 'rom' = 'on' is not supported with"
" 'readonly' = 'off'");
return;
return false;
}
break;
case ON_OFF_AUTO_OFF:
if (fb->readonly && backend->share) {
error_setg(errp, "property 'rom' = 'off' is incompatible with"
" 'readonly' = 'on' and 'share' = 'on'");
return;
return false;
}
break;
default:
assert(false);
g_assert_not_reached();
}
name = host_memory_backend_get_name(backend);
@ -86,10 +87,9 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
ram_flags |= fb->is_pmem ? RAM_PMEM : 0;
ram_flags |= RAM_NAMED_FILE;
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name,
backend->size, fb->align, ram_flags,
fb->mem_path, fb->offset, errp);
g_free(name);
return memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name,
backend->size, fb->align, ram_flags,
fb->mem_path, fb->offset, errp);
#endif
}

View File

@ -31,17 +31,17 @@ struct HostMemoryBackendMemfd {
bool seal;
};
static void
static bool
memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend);
g_autofree char *name = NULL;
uint32_t ram_flags;
char *name;
int fd;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return;
return false;
}
fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size,
@ -49,15 +49,14 @@ memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0,
errp);
if (fd == -1) {
return;
return false;
}
name = host_memory_backend_get_name(backend);
ram_flags = backend->share ? RAM_SHARED : 0;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name,
backend->size, ram_flags, fd, 0, errp);
g_free(name);
return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name,
backend->size, ram_flags, fd, 0, errp);
}
static bool

View File

@ -16,23 +16,23 @@
#include "qemu/module.h"
#include "qom/object_interfaces.h"
static void
static bool
ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
g_autofree char *name = NULL;
uint32_t ram_flags;
char *name;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return;
return false;
}
name = host_memory_backend_get_name(backend);
ram_flags = backend->share ? RAM_SHARED : 0;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), name,
backend->size, ram_flags, errp);
g_free(name);
return memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend),
name, backend->size,
ram_flags, errp);
}
static void

View File

@ -20,6 +20,7 @@
#include "qom/object_interfaces.h"
#include "qemu/mmap-alloc.h"
#include "qemu/madvise.h"
#include "hw/qdev-core.h"
#ifdef CONFIG_NUMA
#include <numaif.h>
@ -219,7 +220,6 @@ static bool host_memory_backend_get_prealloc(Object *obj, Error **errp)
static void host_memory_backend_set_prealloc(Object *obj, bool value,
Error **errp)
{
Error *local_err = NULL;
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
if (!backend->reserve && value) {
@ -237,10 +237,8 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value,
void *ptr = memory_region_get_ram_ptr(&backend->mr);
uint64_t sz = memory_region_size(&backend->mr);
qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads,
backend->prealloc_context, &local_err);
if (local_err) {
error_propagate(errp, local_err);
if (!qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads,
backend->prealloc_context, false, errp)) {
return;
}
backend->prealloc = true;
@ -324,91 +322,92 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(uc);
HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc);
Error *local_err = NULL;
void *ptr;
uint64_t sz;
bool async = !phase_check(PHASE_LATE_BACKENDS_CREATED);
if (bc->alloc) {
bc->alloc(backend, &local_err);
if (local_err) {
goto out;
}
if (!bc->alloc) {
return;
}
if (!bc->alloc(backend, errp)) {
return;
}
ptr = memory_region_get_ram_ptr(&backend->mr);
sz = memory_region_size(&backend->mr);
ptr = memory_region_get_ram_ptr(&backend->mr);
sz = memory_region_size(&backend->mr);
if (backend->merge) {
qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE);
}
if (!backend->dump) {
qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP);
}
if (backend->merge) {
qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE);
}
if (!backend->dump) {
qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP);
}
#ifdef CONFIG_NUMA
unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES);
/* lastbit == MAX_NODES means maxnode = 0 */
unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1);
/* ensure policy won't be ignored in case memory is preallocated
* before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so
* this doesn't catch hugepage case. */
unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
int mode = backend->policy;
unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES);
/* lastbit == MAX_NODES means maxnode = 0 */
unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1);
/*
* Ensure policy won't be ignored in case memory is preallocated
* before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so
* this doesn't catch hugepage case.
*/
unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
int mode = backend->policy;
/* check for invalid host-nodes and policies and give more verbose
* error messages than mbind(). */
if (maxnode && backend->policy == MPOL_DEFAULT) {
error_setg(errp, "host-nodes must be empty for policy default,"
" or you should explicitly specify a policy other"
" than default");
return;
} else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) {
error_setg(errp, "host-nodes must be set for policy %s",
HostMemPolicy_str(backend->policy));
return;
}
/* check for invalid host-nodes and policies and give more verbose
* error messages than mbind(). */
if (maxnode && backend->policy == MPOL_DEFAULT) {
error_setg(errp, "host-nodes must be empty for policy default,"
" or you should explicitly specify a policy other"
" than default");
return;
} else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) {
error_setg(errp, "host-nodes must be set for policy %s",
HostMemPolicy_str(backend->policy));
return;
}
/* We can have up to MAX_NODES nodes, but we need to pass maxnode+1
* as argument to mbind() due to an old Linux bug (feature?) which
* cuts off the last specified node. This means backend->host_nodes
* must have MAX_NODES+1 bits available.
*/
assert(sizeof(backend->host_nodes) >=
BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long));
assert(maxnode <= MAX_NODES);
/*
* We can have up to MAX_NODES nodes, but we need to pass maxnode+1
* as argument to mbind() due to an old Linux bug (feature?) which
* cuts off the last specified node. This means backend->host_nodes
* must have MAX_NODES+1 bits available.
*/
assert(sizeof(backend->host_nodes) >=
BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long));
assert(maxnode <= MAX_NODES);
#ifdef HAVE_NUMA_HAS_PREFERRED_MANY
if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) {
/*
* Replace with MPOL_PREFERRED_MANY otherwise the mbind() below
* silently picks the first node.
*/
mode = MPOL_PREFERRED_MANY;
}
if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) {
/*
* Replace with MPOL_PREFERRED_MANY otherwise the mbind() below
* silently picks the first node.
*/
mode = MPOL_PREFERRED_MANY;
}
#endif
if (maxnode &&
mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) {
if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) {
error_setg_errno(errp, errno,
"cannot bind memory to host NUMA nodes");
return;
}
}
#endif
/* Preallocate memory after the NUMA policy has been instantiated.
* This is necessary to guarantee memory is allocated with
* specified NUMA policy in place.
*/
if (backend->prealloc) {
qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz,
backend->prealloc_threads,
backend->prealloc_context, &local_err);
if (local_err) {
goto out;
}
if (maxnode &&
mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) {
if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) {
error_setg_errno(errp, errno,
"cannot bind memory to host NUMA nodes");
return;
}
}
out:
error_propagate(errp, local_err);
#endif
/*
* Preallocate memory after the NUMA policy has been instantiated.
* This is necessary to guarantee memory is allocated with
* specified NUMA policy in place.
*/
if (backend->prealloc && !qemu_prealloc_mem(memory_region_get_fd(&backend->mr),
ptr, sz,
backend->prealloc_threads,
backend->prealloc_context,
async, errp)) {
return;
}
}
static bool

234
backends/iommufd.c Normal file
View File

@ -0,0 +1,234 @@
/*
* iommufd container backend
*
* Copyright (C) 2023 Intel Corporation.
* Copyright Red Hat, Inc. 2023
*
* Authors: Yi Liu <yi.l.liu@intel.com>
* Eric Auger <eric.auger@redhat.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "sysemu/iommufd.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/module.h"
#include "qom/object_interfaces.h"
#include "qemu/error-report.h"
#include "monitor/monitor.h"
#include "trace.h"
#include <sys/ioctl.h>
#include <linux/iommufd.h>
static void iommufd_backend_init(Object *obj)
{
IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
be->fd = -1;
be->users = 0;
be->owned = true;
}
static void iommufd_backend_finalize(Object *obj)
{
IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
if (be->owned) {
close(be->fd);
be->fd = -1;
}
}
static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp)
{
ERRP_GUARD();
IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
int fd = -1;
fd = monitor_fd_param(monitor_cur(), str, errp);
if (fd == -1) {
error_prepend(errp, "Could not parse remote object fd %s:", str);
return;
}
be->fd = fd;
be->owned = false;
trace_iommu_backend_set_fd(be->fd);
}
static bool iommufd_backend_can_be_deleted(UserCreatable *uc)
{
IOMMUFDBackend *be = IOMMUFD_BACKEND(uc);
return !be->users;
}
static void iommufd_backend_class_init(ObjectClass *oc, void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
ucc->can_be_deleted = iommufd_backend_can_be_deleted;
object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd);
}
int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
{
int fd, ret = 0;
if (be->owned && !be->users) {
fd = qemu_open_old("/dev/iommu", O_RDWR);
if (fd < 0) {
error_setg_errno(errp, errno, "/dev/iommu opening failed");
ret = fd;
goto out;
}
be->fd = fd;
}
be->users++;
out:
trace_iommufd_backend_connect(be->fd, be->owned,
be->users, ret);
return ret;
}
void iommufd_backend_disconnect(IOMMUFDBackend *be)
{
if (!be->users) {
goto out;
}
be->users--;
if (!be->users && be->owned) {
close(be->fd);
be->fd = -1;
}
out:
trace_iommufd_backend_disconnect(be->fd, be->users);
}
int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
Error **errp)
{
int ret, fd = be->fd;
struct iommu_ioas_alloc alloc_data = {
.size = sizeof(alloc_data),
.flags = 0,
};
ret = ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data);
if (ret) {
error_setg_errno(errp, errno, "Failed to allocate ioas");
return ret;
}
*ioas_id = alloc_data.out_ioas_id;
trace_iommufd_backend_alloc_ioas(fd, *ioas_id, ret);
return ret;
}
void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id)
{
int ret, fd = be->fd;
struct iommu_destroy des = {
.size = sizeof(des),
.id = id,
};
ret = ioctl(fd, IOMMU_DESTROY, &des);
trace_iommufd_backend_free_id(fd, id, ret);
if (ret) {
error_report("Failed to free id: %u %m", id);
}
}
int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
ram_addr_t size, void *vaddr, bool readonly)
{
int ret, fd = be->fd;
struct iommu_ioas_map map = {
.size = sizeof(map),
.flags = IOMMU_IOAS_MAP_READABLE |
IOMMU_IOAS_MAP_FIXED_IOVA,
.ioas_id = ioas_id,
.__reserved = 0,
.user_va = (uintptr_t)vaddr,
.iova = iova,
.length = size,
};
if (!readonly) {
map.flags |= IOMMU_IOAS_MAP_WRITEABLE;
}
ret = ioctl(fd, IOMMU_IOAS_MAP, &map);
trace_iommufd_backend_map_dma(fd, ioas_id, iova, size,
vaddr, readonly, ret);
if (ret) {
ret = -errno;
/* TODO: Not support mapping hardware PCI BAR region for now. */
if (errno == EFAULT) {
warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
} else {
error_report("IOMMU_IOAS_MAP failed: %m");
}
}
return ret;
}
int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
hwaddr iova, ram_addr_t size)
{
int ret, fd = be->fd;
struct iommu_ioas_unmap unmap = {
.size = sizeof(unmap),
.ioas_id = ioas_id,
.iova = iova,
.length = size,
};
ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap);
/*
* IOMMUFD takes mapping as some kind of object, unmapping
* nonexistent mapping is treated as deleting a nonexistent
* object and return ENOENT. This is different from legacy
* backend which allows it. vIOMMU may trigger a lot of
* redundant unmapping, to avoid flush the log, treat them
* as succeess for IOMMUFD just like legacy backend.
*/
if (ret && errno == ENOENT) {
trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret);
ret = 0;
} else {
trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret);
}
if (ret) {
ret = -errno;
error_report("IOMMU_IOAS_UNMAP failed: %m");
}
return ret;
}
static const TypeInfo iommufd_backend_info = {
.name = TYPE_IOMMUFD_BACKEND,
.parent = TYPE_OBJECT,
.instance_size = sizeof(IOMMUFDBackend),
.instance_init = iommufd_backend_init,
.instance_finalize = iommufd_backend_finalize,
.class_size = sizeof(IOMMUFDBackendClass),
.class_init = iommufd_backend_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
};
static void register_types(void)
{
type_register_static(&iommufd_backend_info);
}
type_init(register_types);

View File

@ -10,9 +10,13 @@ system_ss.add([files(
'confidential-guest-support.c',
), numa])
system_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c'))
system_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c'))
system_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c'))
if host_os != 'windows'
system_ss.add(files('rng-random.c'))
system_ss.add(files('hostmem-file.c'))
endif
if host_os == 'linux'
system_ss.add(files('hostmem-memfd.c'))
endif
if keyutils.found()
system_ss.add(keyutils, files('cryptodev-lkcf.c'))
endif
@ -20,6 +24,7 @@ if have_vhost_user
system_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c'))
endif
system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c'))
system_ss.add(when: 'CONFIG_IOMMUFD', if_true: files('iommufd.c'))
if have_vhost_user_crypto
system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost-user.c'))
endif

View File

@ -904,7 +904,7 @@ static void tpm_emulator_vm_state_change(void *opaque, bool running,
trace_tpm_emulator_vm_state_change(running, state);
if (!running || state != RUN_STATE_RUNNING || !tpm_emu->relock_storage) {
if (!running || !tpm_emu->relock_storage) {
return;
}
@ -939,7 +939,7 @@ static const VMStateDescription vmstate_tpm_emulator = {
.version_id = 0,
.pre_save = tpm_emulator_pre_save,
.post_load = tpm_emulator_post_load,
.fields = (VMStateField[]) {
.fields = (const VMStateField[]) {
VMSTATE_UINT32(state_blobs.permanent_flags, TPMEmulator),
VMSTATE_UINT32(state_blobs.permanent.size, TPMEmulator),
VMSTATE_VBUFFER_ALLOC_UINT32(state_blobs.permanent.buffer,

View File

@ -5,3 +5,13 @@ dbus_vmstate_pre_save(void)
dbus_vmstate_post_load(int version_id) "version_id: %d"
dbus_vmstate_loading(const char *id) "id: %s"
dbus_vmstate_saving(const char *id) "id: %s"
# iommufd.c
iommufd_backend_connect(int fd, bool owned, uint32_t users, int ret) "fd=%d owned=%d users=%d (%d)"
iommufd_backend_disconnect(int fd, uint32_t users) "fd=%d users=%d"
iommu_backend_set_fd(int fd) "pre-opened /dev/iommu fd=%d"
iommufd_backend_map_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, void *vaddr, bool readonly, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" addr=%p readonly=%d (%d)"
iommufd_backend_unmap_dma_non_exist(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " Unmap nonexistent mapping: iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)"
iommufd_backend_unmap_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)"
iommufd_backend_alloc_ioas(int iommufd, uint32_t ioas, int ret) " iommufd=%d ioas=%d (%d)"
iommufd_backend_free_id(int iommufd, uint32_t id, int ret) " iommufd=%d id=%d (%d)"

411
block.c

File diff suppressed because it is too large Load Diff

View File

@ -496,10 +496,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
block_copy_set_speed(bcs, speed);
/* Required permissions are taken by copy-before-write filter target */
bdrv_graph_wrlock(target);
bdrv_graph_wrlock();
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
&error_abort);
bdrv_graph_wrunlock(target);
bdrv_graph_wrunlock();
return &job->common;

View File

@ -89,6 +89,9 @@ static int blkio_resize_bounce_pool(BDRVBlkioState *s, int64_t bytes)
/* Pad size to reduce frequency of resize calls */
bytes += 128 * 1024;
/* Align the pool size to avoid blkio_alloc_mem_region() failure */
bytes = QEMU_ALIGN_UP(bytes, s->mem_region_alignment);
WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
int ret;

View File

@ -3,7 +3,7 @@
*
* Copyright (c) 2017 Tuomas Tynkkynen <tuomas@tuxera.com>
* Copyright (c) 2018 Aapo Vienamo <aapo@tuxera.com>
* Copyright (c) 2018 Ari Sundholm <ari@tuxera.com>
* Copyright (c) 2018-2024 Ari Sundholm <ari@tuxera.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
@ -55,9 +55,34 @@ typedef struct {
BdrvChild *log_file;
uint32_t sectorsize;
uint32_t sectorbits;
uint64_t update_interval;
/*
* The mutable state of the driver, consisting of the current log sector
* and the number of log entries.
*
* May be read and/or written from multiple threads, and the mutex must be
* held when accessing these fields.
*/
uint64_t cur_log_sector;
uint64_t nr_entries;
uint64_t update_interval;
QemuMutex mutex;
/*
* The super block sequence number. Non-zero if a super block update is in
* progress.
*
* The mutex must be held when accessing this field.
*/
uint64_t super_update_seq;
/*
* A coroutine-aware queue to serialize super block updates.
*
* Used with the mutex to ensure that only one thread be updating the super
* block at a time.
*/
CoQueue super_update_queue;
} BDRVBlkLogWritesState;
static QemuOptsList runtime_opts = {
@ -169,6 +194,9 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
qemu_mutex_init(&s->mutex);
qemu_co_queue_init(&s->super_update_queue);
log_append = qemu_opt_get_bool(opts, "log-append", false);
if (log_append) {
@ -231,6 +259,8 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
s->nr_entries = 0;
}
s->super_update_seq = 0;
if (!blk_log_writes_sector_size_valid(log_sector_size)) {
ret = -EINVAL;
error_setg(errp, "Invalid log sector size %"PRIu64, log_sector_size);
@ -251,10 +281,11 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
ret = 0;
fail_log:
if (ret < 0) {
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->log_file);
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
s->log_file = NULL;
qemu_mutex_destroy(&s->mutex);
}
fail:
qemu_opts_del(opts);
@ -265,10 +296,11 @@ static void blk_log_writes_close(BlockDriverState *bs)
{
BDRVBlkLogWritesState *s = bs->opaque;
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->log_file);
s->log_file = NULL;
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
qemu_mutex_destroy(&s->mutex);
}
static int64_t coroutine_fn GRAPH_RDLOCK
@ -295,7 +327,7 @@ static void blk_log_writes_child_perm(BlockDriverState *bs, BdrvChild *c,
static void blk_log_writes_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVBlkLogWritesState *s = bs->opaque;
const BDRVBlkLogWritesState *s = bs->opaque;
bs->bl.request_alignment = s->sectorsize;
}
@ -338,15 +370,18 @@ blk_log_writes_co_do_log(BlkLogWritesLogReq *lr)
* driver may be modified by other driver operations while waiting for the
* I/O to complete.
*/
qemu_mutex_lock(&s->mutex);
const uint64_t entry_start_sector = s->cur_log_sector;
const uint64_t entry_offset = entry_start_sector << s->sectorbits;
const uint64_t qiov_aligned_size = ROUND_UP(lr->qiov->size, s->sectorsize);
const uint64_t entry_aligned_size = qiov_aligned_size +
ROUND_UP(lr->zero_size, s->sectorsize);
const uint64_t entry_nr_sectors = entry_aligned_size >> s->sectorbits;
const uint64_t entry_seq = s->nr_entries + 1;
s->nr_entries++;
s->nr_entries = entry_seq;
s->cur_log_sector += entry_nr_sectors;
qemu_mutex_unlock(&s->mutex);
/*
* Write the log entry. Note that if this is a "write zeroes" operation,
@ -366,17 +401,44 @@ blk_log_writes_co_do_log(BlkLogWritesLogReq *lr)
/* Update super block on flush or every update interval */
if (lr->log_ret == 0 && ((lr->entry.flags & LOG_FLUSH_FLAG)
|| (s->nr_entries % s->update_interval == 0)))
|| (entry_seq % s->update_interval == 0)))
{
struct log_write_super super = {
.magic = cpu_to_le64(WRITE_LOG_MAGIC),
.version = cpu_to_le64(WRITE_LOG_VERSION),
.nr_entries = cpu_to_le64(s->nr_entries),
.nr_entries = 0, /* updated below */
.sectorsize = cpu_to_le32(s->sectorsize),
};
void *zeroes = g_malloc0(s->sectorsize - sizeof(super));
void *zeroes;
QEMUIOVector qiov;
/*
* Wait if a super block update is already in progress.
* Bail out if a newer update got its turn before us.
*/
WITH_QEMU_LOCK_GUARD(&s->mutex) {
CoQueueWaitFlags wait_flags = 0;
while (s->super_update_seq) {
if (entry_seq < s->super_update_seq) {
return;
}
qemu_co_queue_wait_flags(&s->super_update_queue,
&s->mutex, wait_flags);
/*
* In case the wait condition remains true after wakeup,
* to avoid starvation, make sure that this request is
* scheduled to rerun next by pushing it to the front of the
* queue.
*/
wait_flags = CO_QUEUE_WAIT_FRONT;
}
s->super_update_seq = entry_seq;
super.nr_entries = cpu_to_le64(s->nr_entries);
}
zeroes = g_malloc0(s->sectorsize - sizeof(super));
qemu_iovec_init(&qiov, 2);
qemu_iovec_add(&qiov, &super, sizeof(super));
qemu_iovec_add(&qiov, zeroes, s->sectorsize - sizeof(super));
@ -386,6 +448,13 @@ blk_log_writes_co_do_log(BlkLogWritesLogReq *lr)
if (lr->log_ret == 0) {
lr->log_ret = bdrv_co_flush(s->log_file->bs);
}
/* The super block has been updated. Let another request have a go. */
qemu_mutex_lock(&s->mutex);
s->super_update_seq = 0;
(void) qemu_co_queue_next(&s->super_update_queue);
qemu_mutex_unlock(&s->mutex);
qemu_iovec_destroy(&qiov);
g_free(zeroes);
}
@ -405,7 +474,7 @@ blk_log_writes_co_log(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
{
QEMUIOVector log_qiov;
size_t niov = qiov ? qiov->niov : 0;
BDRVBlkLogWritesState *s = bs->opaque;
const BDRVBlkLogWritesState *s = bs->opaque;
BlkLogWritesFileReq fr = {
.bs = bs,
.offset = offset,

View File

@ -151,10 +151,10 @@ static void blkverify_close(BlockDriverState *bs)
{
BDRVBlkverifyState *s = bs->opaque;
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->test_file);
s->test_file = NULL;
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
}
static int64_t coroutine_fn GRAPH_RDLOCK

View File

@ -53,7 +53,7 @@ struct BlockBackend {
//// --- End LibAFL code ---
int refcnt;
BdrvChild *root;
AioContext *ctx;
AioContext *ctx; /* access with atomic operations only */
DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
@ -399,8 +399,6 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
* Both sets of permissions can be changed later using blk_set_perm().
*
* Return the new BlockBackend on success, null on failure.
*
* Callers must hold the AioContext lock of @bs.
*/
BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
uint64_t shared_perm, Error **errp)
@ -425,8 +423,6 @@ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
* Just as with bdrv_open(), after having called this function the reference to
* @options belongs to the block layer (even on failure).
*
* Called without holding an AioContext lock.
*
* TODO: Remove @filename and @flags; it should be possible to specify a whole
* BDS tree just by specifying the @options QDict (or @reference,
* alternatively). At the time of adding this function, this is not possible,
@ -438,7 +434,6 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
{
BlockBackend *blk;
BlockDriverState *bs;
AioContext *ctx;
uint64_t perm = 0;
uint64_t shared = BLK_PERM_ALL;
@ -468,23 +463,18 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED;
}
aio_context_acquire(qemu_get_aio_context());
bs = bdrv_open(filename, reference, options, flags, errp);
aio_context_release(qemu_get_aio_context());
if (!bs) {
return NULL;
}
/* bdrv_open() could have moved bs to a different AioContext */
ctx = bdrv_get_aio_context(bs);
blk = blk_new(bdrv_get_aio_context(bs), perm, shared);
blk->perm = perm;
blk->shared_perm = shared;
aio_context_acquire(ctx);
blk_insert_bs(blk, bs, errp);
bdrv_unref(bs);
aio_context_release(ctx);
if (!blk->root) {
blk_unref(blk);
@ -586,13 +576,9 @@ void blk_remove_all_bs(void)
GLOBAL_STATE_CODE();
while ((blk = blk_all_next(blk)) != NULL) {
AioContext *ctx = blk_get_aio_context(blk);
aio_context_acquire(ctx);
if (blk->root) {
blk_remove_bs(blk);
}
aio_context_release(ctx);
}
}
@ -622,14 +608,14 @@ BlockDriverState *bdrv_next(BdrvNextIterator *it)
/* Must be called from the main loop */
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
old_bs = it->bs;
/* First, return all root nodes of BlockBackends. In order to avoid
* returning a BDS twice when multiple BBs refer to it, we only return it
* if the BB is the first one in the parent list of the BDS. */
if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
BlockBackend *old_blk = it->blk;
old_bs = old_blk ? blk_bs(old_blk) : NULL;
do {
it->blk = blk_all_next(it->blk);
bs = it->blk ? blk_bs(it->blk) : NULL;
@ -643,11 +629,10 @@ BlockDriverState *bdrv_next(BdrvNextIterator *it)
if (bs) {
bdrv_ref(bs);
bdrv_unref(old_bs);
it->bs = bs;
return bs;
}
it->phase = BDRV_NEXT_MONITOR_OWNED;
} else {
old_bs = it->bs;
}
/* Then return the monitor-owned BDSes without a BB attached. Ignore all
@ -687,13 +672,10 @@ void bdrv_next_cleanup(BdrvNextIterator *it)
/* Must be called from the main loop */
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
if (it->blk) {
bdrv_unref(blk_bs(it->blk));
blk_unref(it->blk);
}
} else {
bdrv_unref(it->bs);
bdrv_unref(it->bs);
if (it->phase == BDRV_NEXT_BACKEND_ROOTS && it->blk) {
blk_unref(it->blk);
}
bdrv_next_reset(it);
@ -719,16 +701,16 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
error_setg(errp, "Invalid device name");
return false;
}
//// --- Begin LibAFL code ---
if (blk_by_name(name)) {
error_setg(errp, "Device with id '%s' already exists", name);
return false;
}
//// --- End LibAFL code ---
//// --- Begin LibAFL code ---
if (blk_by_name_hash(g_str_hash(name))) {
error_setg(errp, "Device with name hash '%x' already exists", g_str_hash(name));
return false;
}
//// --- End LibAFL code ---
if (bdrv_find_node(name)) {
error_setg(errp,
"Device name '%s' conflicts with an existing node name",
@ -924,14 +906,11 @@ BlockBackend *blk_by_public(BlockBackendPublic *public)
/*
* Disassociates the currently associated BlockDriverState from @blk.
*
* The caller must hold the AioContext lock for the BlockBackend.
*/
void blk_remove_bs(BlockBackend *blk)
{
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
BdrvChild *root;
AioContext *ctx;
GLOBAL_STATE_CODE();
@ -961,30 +940,26 @@ void blk_remove_bs(BlockBackend *blk)
root = blk->root;
blk->root = NULL;
ctx = bdrv_get_aio_context(root->bs);
bdrv_graph_wrlock(root->bs);
bdrv_graph_wrlock();
bdrv_root_unref_child(root);
bdrv_graph_wrunlock_ctx(ctx);
bdrv_graph_wrunlock();
}
/*
* Associates a new BlockDriverState with @blk.
*
* Callers must hold the AioContext lock of @bs.
*/
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
{
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
AioContext *ctx = bdrv_get_aio_context(bs);
GLOBAL_STATE_CODE();
bdrv_ref(bs);
bdrv_graph_wrlock(bs);
bdrv_graph_wrlock();
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
blk->perm, blk->shared_perm,
blk, errp);
bdrv_graph_wrunlock_ctx(ctx);
bdrv_graph_wrunlock();
if (blk->root == NULL) {
return -EPERM;
}
@ -2504,22 +2479,22 @@ void blk_op_unblock_all(BlockBackend *blk, Error *reason)
}
}
/**
* Return BB's current AioContext. Note that this context may change
* concurrently at any time, with one exception: If the BB has a root node
* attached, its context will only change through bdrv_try_change_aio_context(),
* which creates a drained section. Therefore, incrementing such a BB's
* in-flight counter will prevent its context from changing.
*/
AioContext *blk_get_aio_context(BlockBackend *blk)
{
BlockDriverState *bs;
IO_CODE();
if (!blk) {
return qemu_get_aio_context();
}
bs = blk_bs(blk);
if (bs) {
AioContext *ctx = bdrv_get_aio_context(blk_bs(blk));
assert(ctx == blk->ctx);
}
return blk->ctx;
return qatomic_read(&blk->ctx);
}
int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
@ -2532,7 +2507,7 @@ int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
GLOBAL_STATE_CODE();
if (!bs) {
blk->ctx = new_context;
qatomic_set(&blk->ctx, new_context);
return 0;
}
@ -2561,7 +2536,7 @@ static void blk_root_set_aio_ctx_commit(void *opaque)
AioContext *new_context = s->new_ctx;
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
blk->ctx = new_context;
qatomic_set(&blk->ctx, new_context);
if (tgm->throttle_state) {
throttle_group_detach_aio_context(tgm);
throttle_group_attach_aio_context(tgm, new_context);
@ -2808,20 +2783,16 @@ int blk_commit_all(void)
GRAPH_RDLOCK_GUARD_MAINLOOP();
while ((blk = blk_all_next(blk)) != NULL) {
AioContext *aio_context = blk_get_aio_context(blk);
BlockDriverState *unfiltered_bs = bdrv_skip_filters(blk_bs(blk));
aio_context_acquire(aio_context);
if (blk_is_inserted(blk) && bdrv_cow_child(unfiltered_bs)) {
int ret;
ret = bdrv_commit(unfiltered_bs);
if (ret < 0) {
aio_context_release(aio_context);
return ret;
}
}
aio_context_release(aio_context);
}
return 0;
}

View File

@ -42,6 +42,7 @@ typedef struct CommitBlockJob {
bool base_read_only;
bool chain_frozen;
char *backing_file_str;
bool backing_mask_protocol;
} CommitBlockJob;
static int commit_prepare(Job *job)
@ -61,7 +62,8 @@ static int commit_prepare(Job *job)
/* FIXME: bdrv_drop_intermediate treats total failures and partial failures
* identically. Further work is needed to disambiguate these cases. */
return bdrv_drop_intermediate(s->commit_top_bs, s->base_bs,
s->backing_file_str);
s->backing_file_str,
s->backing_mask_protocol);
}
static void commit_abort(Job *job)
@ -100,9 +102,9 @@ static void commit_abort(Job *job)
bdrv_graph_rdunlock_main_loop();
bdrv_drained_begin(commit_top_backing_bs);
bdrv_graph_wrlock(commit_top_backing_bs);
bdrv_graph_wrlock();
bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort);
bdrv_graph_wrunlock(commit_top_backing_bs);
bdrv_graph_wrunlock();
bdrv_drained_end(commit_top_backing_bs);
bdrv_unref(s->commit_top_bs);
@ -254,6 +256,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, BlockDriverState *top,
int creation_flags, int64_t speed,
BlockdevOnError on_error, const char *backing_file_str,
bool backing_mask_protocol,
const char *filter_node_name, Error **errp)
{
CommitBlockJob *s;
@ -339,7 +342,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
* this is the responsibility of the interface (i.e. whoever calls
* commit_start()).
*/
bdrv_graph_wrlock(top);
bdrv_graph_wrlock();
s->base_overlay = bdrv_find_overlay(top, base);
assert(s->base_overlay);
@ -370,19 +373,19 @@ void commit_start(const char *job_id, BlockDriverState *bs,
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
iter_shared_perms, errp);
if (ret < 0) {
bdrv_graph_wrunlock(top);
bdrv_graph_wrunlock();
goto fail;
}
}
if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
bdrv_graph_wrunlock(top);
bdrv_graph_wrunlock();
goto fail;
}
s->chain_frozen = true;
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
bdrv_graph_wrunlock(top);
bdrv_graph_wrunlock();
if (ret < 0) {
goto fail;
@ -408,6 +411,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
blk_set_disable_request_queuing(s->top, true);
s->backing_file_str = g_strdup(backing_file_str);
s->backing_mask_protocol = backing_mask_protocol;
s->on_error = on_error;
trace_commit_start(bs, base, top, s);
@ -434,9 +438,9 @@ fail:
* otherwise this would fail because of lack of permissions. */
if (commit_top_bs) {
bdrv_drained_begin(top);
bdrv_graph_wrlock(top);
bdrv_graph_wrlock();
bdrv_replace_node(commit_top_bs, top, &error_abort);
bdrv_graph_wrunlock(top);
bdrv_graph_wrunlock();
bdrv_drained_end(top);
}
}

View File

@ -407,12 +407,12 @@ out:
static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
ERRP_GUARD();
BDRVCopyBeforeWriteState *s = bs->opaque;
BdrvDirtyBitmap *bitmap = NULL;
int64_t cluster_size;
g_autoptr(BlockdevOptions) full_opts = NULL;
BlockdevOptionsCbw *opts;
AioContext *ctx;
int ret;
full_opts = cbw_parse_options(options, errp);
@ -435,15 +435,11 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
GRAPH_RDLOCK_GUARD_MAINLOOP();
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (opts->bitmap) {
bitmap = block_dirty_bitmap_lookup(opts->bitmap->node,
opts->bitmap->name, NULL, errp);
if (!bitmap) {
ret = -EINVAL;
goto out;
return -EINVAL;
}
}
s->on_cbw_error = opts->has_on_cbw_error ? opts->on_cbw_error :
@ -461,24 +457,21 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp);
if (!s->bcs) {
error_prepend(errp, "Cannot create block-copy-state: ");
ret = -EINVAL;
goto out;
return -EINVAL;
}
cluster_size = block_copy_cluster_size(s->bcs);
s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
if (!s->done_bitmap) {
ret = -EINVAL;
goto out;
return -EINVAL;
}
bdrv_disable_dirty_bitmap(s->done_bitmap);
/* s->access_bitmap starts equal to bcs bitmap */
s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
if (!s->access_bitmap) {
ret = -EINVAL;
goto out;
return -EINVAL;
}
bdrv_disable_dirty_bitmap(s->access_bitmap);
bdrv_dirty_bitmap_merge_internal(s->access_bitmap,
@ -487,11 +480,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
qemu_co_mutex_init(&s->lock);
QLIST_INIT(&s->frozen_read_reqs);
ret = 0;
out:
aio_context_release(ctx);
return ret;
return 0;
}
static void cbw_close(BlockDriverState *bs)

View File

@ -39,6 +39,7 @@ typedef struct BlockCrypto BlockCrypto;
struct BlockCrypto {
QCryptoBlock *block;
bool updating_keys;
BdrvChild *header; /* Reference to the detached LUKS header */
};
@ -63,12 +64,14 @@ static int block_crypto_read_func(QCryptoBlock *block,
Error **errp)
{
BlockDriverState *bs = opaque;
BlockCrypto *crypto = bs->opaque;
ssize_t ret;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
ret = bdrv_pread(bs->file, offset, buflen, buf, 0);
ret = bdrv_pread(crypto->header ? crypto->header : bs->file,
offset, buflen, buf, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read encryption header");
return ret;
@ -84,12 +87,14 @@ static int block_crypto_write_func(QCryptoBlock *block,
Error **errp)
{
BlockDriverState *bs = opaque;
BlockCrypto *crypto = bs->opaque;
ssize_t ret;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
ret = bdrv_pwrite(bs->file, offset, buflen, buf, 0);
ret = bdrv_pwrite(crypto->header ? crypto->header : bs->file,
offset, buflen, buf, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not write encryption header");
return ret;
@ -157,6 +162,48 @@ error:
return ret;
}
static int coroutine_fn GRAPH_UNLOCKED
block_crypto_co_format_luks_payload(BlockdevCreateOptionsLUKS *luks_opts,
Error **errp)
{
BlockDriverState *bs = NULL;
BlockBackend *blk = NULL;
Error *local_error = NULL;
int ret;
if (luks_opts->size > INT64_MAX) {
return -EFBIG;
}
bs = bdrv_co_open_blockdev_ref(luks_opts->file, errp);
if (bs == NULL) {
return -EIO;
}
blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE,
BLK_PERM_ALL, errp);
if (!blk) {
ret = -EPERM;
goto fail;
}
ret = blk_truncate(blk, luks_opts->size, true,
luks_opts->preallocation, 0, &local_error);
if (ret < 0) {
if (ret == -EFBIG) {
/* Replace the error message with a better one */
error_free(local_error);
error_setg(errp, "The requested file size is too large");
}
goto fail;
}
ret = 0;
fail:
bdrv_co_unref(bs);
return ret;
}
static QemuOptsList block_crypto_runtime_opts_luks = {
.name = "crypto",
@ -184,6 +231,7 @@ static QemuOptsList block_crypto_create_opts_luks = {
BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG(""),
BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG(""),
BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME(""),
BLOCK_CRYPTO_OPT_DEF_LUKS_DETACHED_HEADER(""),
{ /* end of list */ }
},
};
@ -262,6 +310,8 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
int flags,
Error **errp)
{
ERRP_GUARD();
BlockCrypto *crypto = bs->opaque;
QemuOpts *opts = NULL;
int ret;
@ -276,6 +326,13 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
return ret;
}
crypto->header = bdrv_open_child(NULL, options, "header", bs,
&child_of_bds, BDRV_CHILD_METADATA,
true, errp);
if (*errp != NULL) {
return -EINVAL;
}
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs->supported_write_flags = BDRV_REQ_FUA &
@ -299,6 +356,9 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
if (flags & BDRV_O_NO_IO) {
cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
}
if (crypto->header != NULL) {
cflags |= QCRYPTO_BLOCK_OPEN_DETACHED;
}
crypto->block = qcrypto_block_open(open_opts, NULL,
block_crypto_read_func,
bs,
@ -324,7 +384,9 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
static int coroutine_fn GRAPH_UNLOCKED
block_crypto_co_create_generic(BlockDriverState *bs, int64_t size,
QCryptoBlockCreateOptions *opts,
PreallocMode prealloc, Error **errp)
PreallocMode prealloc,
unsigned int flags,
Error **errp)
{
int ret;
BlockBackend *blk;
@ -344,7 +406,7 @@ block_crypto_co_create_generic(BlockDriverState *bs, int64_t size,
data = (struct BlockCryptoCreateData) {
.blk = blk,
.size = size,
.size = flags & QCRYPTO_BLOCK_CREATE_DETACHED ? 0 : size,
.prealloc = prealloc,
};
@ -352,6 +414,7 @@ block_crypto_co_create_generic(BlockDriverState *bs, int64_t size,
block_crypto_create_init_func,
block_crypto_create_write_func,
&data,
flags,
errp);
if (!crypto) {
@ -638,17 +701,27 @@ static int coroutine_fn GRAPH_UNLOCKED
block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp)
{
BlockdevCreateOptionsLUKS *luks_opts;
BlockDriverState *hdr_bs = NULL;
BlockDriverState *bs = NULL;
QCryptoBlockCreateOptions create_opts;
PreallocMode preallocation = PREALLOC_MODE_OFF;
unsigned int cflags = 0;
int ret;
assert(create_options->driver == BLOCKDEV_DRIVER_LUKS);
luks_opts = &create_options->u.luks;
bs = bdrv_co_open_blockdev_ref(luks_opts->file, errp);
if (bs == NULL) {
return -EIO;
if (luks_opts->header == NULL && luks_opts->file == NULL) {
error_setg(errp, "Either the parameter 'header' or 'file' must "
"be specified");
return -EINVAL;
}
if ((luks_opts->preallocation != PREALLOC_MODE_OFF) &&
(luks_opts->file == NULL)) {
error_setg(errp, "Parameter 'preallocation' requires 'file' to be "
"specified for formatting LUKS disk");
return -EINVAL;
}
create_opts = (QCryptoBlockCreateOptions) {
@ -660,15 +733,52 @@ block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp)
preallocation = luks_opts->preallocation;
}
ret = block_crypto_co_create_generic(bs, luks_opts->size, &create_opts,
preallocation, errp);
if (ret < 0) {
goto fail;
if (luks_opts->header) {
/* LUKS volume with detached header */
hdr_bs = bdrv_co_open_blockdev_ref(luks_opts->header, errp);
if (hdr_bs == NULL) {
return -EIO;
}
cflags |= QCRYPTO_BLOCK_CREATE_DETACHED;
/* Format the LUKS header node */
ret = block_crypto_co_create_generic(hdr_bs, 0, &create_opts,
PREALLOC_MODE_OFF, cflags, errp);
if (ret < 0) {
goto fail;
}
/* Format the LUKS payload node */
if (luks_opts->file) {
ret = block_crypto_co_format_luks_payload(luks_opts, errp);
if (ret < 0) {
goto fail;
}
}
} else if (luks_opts->file) {
/* LUKS volume with none-detached header */
bs = bdrv_co_open_blockdev_ref(luks_opts->file, errp);
if (bs == NULL) {
return -EIO;
}
ret = block_crypto_co_create_generic(bs, luks_opts->size, &create_opts,
preallocation, cflags, errp);
if (ret < 0) {
goto fail;
}
}
ret = 0;
fail:
bdrv_co_unref(bs);
if (hdr_bs != NULL) {
bdrv_co_unref(hdr_bs);
}
if (bs != NULL) {
bdrv_co_unref(bs);
}
return ret;
}
@ -682,6 +792,9 @@ block_crypto_co_create_opts_luks(BlockDriver *drv, const char *filename,
PreallocMode prealloc;
char *buf = NULL;
int64_t size;
bool detached_hdr =
qemu_opt_get_bool(opts, "detached-header", false);
unsigned int cflags = 0;
int ret;
Error *local_err = NULL;
@ -721,8 +834,13 @@ block_crypto_co_create_opts_luks(BlockDriver *drv, const char *filename,
goto fail;
}
if (detached_hdr) {
cflags |= QCRYPTO_BLOCK_CREATE_DETACHED;
}
/* Create format layer */
ret = block_crypto_co_create_generic(bs, size, create_opts, prealloc, errp);
ret = block_crypto_co_create_generic(bs, size, create_opts,
prealloc, cflags, errp);
if (ret < 0) {
goto fail;
}

View File

@ -41,6 +41,7 @@
#define BLOCK_CRYPTO_OPT_LUKS_IVGEN_HASH_ALG "ivgen-hash-alg"
#define BLOCK_CRYPTO_OPT_LUKS_HASH_ALG "hash-alg"
#define BLOCK_CRYPTO_OPT_LUKS_ITER_TIME "iter-time"
#define BLOCK_CRYPTO_OPT_LUKS_DETACHED_HEADER "detached-header"
#define BLOCK_CRYPTO_OPT_LUKS_KEYSLOT "keyslot"
#define BLOCK_CRYPTO_OPT_LUKS_STATE "state"
#define BLOCK_CRYPTO_OPT_LUKS_OLD_SECRET "old-secret"
@ -100,6 +101,13 @@
.help = "Select new state of affected keyslots (active/inactive)",\
}
#define BLOCK_CRYPTO_OPT_DEF_LUKS_DETACHED_HEADER(prefix) \
{ \
.name = prefix BLOCK_CRYPTO_OPT_LUKS_DETACHED_HEADER, \
.type = QEMU_OPT_BOOL, \
.help = "Create a detached LUKS header", \
}
#define BLOCK_CRYPTO_OPT_DEF_LUKS_KEYSLOT(prefix) \
{ \
.name = prefix BLOCK_CRYPTO_OPT_LUKS_KEYSLOT, \

View File

@ -114,7 +114,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
}
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (export->iothread) {
IOThread *iothread;
@ -133,8 +132,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
set_context_errp = fixed_iothread ? errp : NULL;
ret = bdrv_try_change_aio_context(bs, new_ctx, NULL, set_context_errp);
if (ret == 0) {
aio_context_release(ctx);
aio_context_acquire(new_ctx);
ctx = new_ctx;
} else if (fixed_iothread) {
goto fail;
@ -191,8 +188,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
assert(exp->blk != NULL);
QLIST_INSERT_HEAD(&block_exports, exp, next);
aio_context_release(ctx);
return exp;
fail:
@ -200,7 +195,6 @@ fail:
blk_set_dev_ops(blk, NULL, NULL);
blk_unref(blk);
}
aio_context_release(ctx);
if (exp) {
g_free(exp->id);
g_free(exp);
@ -218,9 +212,6 @@ void blk_exp_ref(BlockExport *exp)
static void blk_exp_delete_bh(void *opaque)
{
BlockExport *exp = opaque;
AioContext *aio_context = exp->ctx;
aio_context_acquire(aio_context);
assert(exp->refcount == 0);
QLIST_REMOVE(exp, next);
@ -230,8 +221,6 @@ static void blk_exp_delete_bh(void *opaque)
qapi_event_send_block_export_deleted(exp->id);
g_free(exp->id);
g_free(exp);
aio_context_release(aio_context);
}
void blk_exp_unref(BlockExport *exp)
@ -249,22 +238,16 @@ void blk_exp_unref(BlockExport *exp)
* connections and other internally held references start to shut down. When
* the function returns, there may still be active references while the export
* is in the process of shutting down.
*
* Acquires exp->ctx internally. Callers must *not* hold the lock.
*/
void blk_exp_request_shutdown(BlockExport *exp)
{
AioContext *aio_context = exp->ctx;
aio_context_acquire(aio_context);
/*
* If the user doesn't own the export any more, it is already shutting
* down. We must not call .request_shutdown and decrease the refcount a
* second time.
*/
if (!exp->user_owned) {
goto out;
return;
}
exp->drv->request_shutdown(exp);
@ -272,9 +255,6 @@ void blk_exp_request_shutdown(BlockExport *exp)
assert(exp->user_owned);
exp->user_owned = false;
blk_exp_unref(exp);
out:
aio_context_release(aio_context);
}
/*

View File

@ -278,7 +278,6 @@ static void vu_blk_exp_resize(void *opaque)
vu_config_change_msg(&vexp->vu_server.vu_dev);
}
/* Called with vexp->export.ctx acquired */
static void vu_blk_drained_begin(void *opaque)
{
VuBlkExport *vexp = opaque;
@ -287,7 +286,6 @@ static void vu_blk_drained_begin(void *opaque)
vhost_user_server_detach_aio_context(&vexp->vu_server);
}
/* Called with vexp->export.blk AioContext acquired */
static void vu_blk_drained_end(void *opaque)
{
VuBlkExport *vexp = opaque;
@ -300,8 +298,6 @@ static void vu_blk_drained_end(void *opaque)
* Ensures that bdrv_drained_begin() waits until in-flight requests complete
* and the server->co_trip coroutine has terminated. It will be restarted in
* vhost_user_server_attach_aio_context().
*
* Called with vexp->export.ctx acquired.
*/
static bool vu_blk_drained_poll(void *opaque)
{

View File

@ -712,17 +712,11 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
#ifdef CONFIG_LINUX_AIO
/* Currently Linux does AIO only for files opened with O_DIRECT */
if (s->use_linux_aio) {
if (!(s->open_flags & O_DIRECT)) {
error_setg(errp, "aio=native was specified, but it requires "
"cache.direct=on, which was not specified.");
ret = -EINVAL;
goto fail;
}
if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) {
error_prepend(errp, "Unable to use native AIO: ");
goto fail;
}
if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) {
error_setg(errp, "aio=native was specified, but it requires "
"cache.direct=on, which was not specified.");
ret = -EINVAL;
goto fail;
}
#else
if (s->use_linux_aio) {
@ -733,14 +727,7 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
}
#endif /* !defined(CONFIG_LINUX_AIO) */
#ifdef CONFIG_LINUX_IO_URING
if (s->use_linux_io_uring) {
if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) {
error_prepend(errp, "Unable to use io_uring: ");
goto fail;
}
}
#else
#ifndef CONFIG_LINUX_IO_URING
if (s->use_linux_io_uring) {
error_setg(errp, "aio=io_uring was specified, but is not supported "
"in this build.");
@ -2444,6 +2431,48 @@ static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
return true;
}
#ifdef CONFIG_LINUX_IO_URING
static inline bool raw_check_linux_io_uring(BDRVRawState *s)
{
Error *local_err = NULL;
AioContext *ctx;
if (!s->use_linux_io_uring) {
return false;
}
ctx = qemu_get_current_aio_context();
if (unlikely(!aio_setup_linux_io_uring(ctx, &local_err))) {
error_reportf_err(local_err, "Unable to use linux io_uring, "
"falling back to thread pool: ");
s->use_linux_io_uring = false;
return false;
}
return true;
}
#endif
#ifdef CONFIG_LINUX_AIO
static inline bool raw_check_linux_aio(BDRVRawState *s)
{
Error *local_err = NULL;
AioContext *ctx;
if (!s->use_linux_aio) {
return false;
}
ctx = qemu_get_current_aio_context();
if (unlikely(!aio_setup_linux_aio(ctx, &local_err))) {
error_reportf_err(local_err, "Unable to use Linux AIO, "
"falling back to thread pool: ");
s->use_linux_aio = false;
return false;
}
return true;
}
#endif
static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
uint64_t bytes, QEMUIOVector *qiov, int type)
{
@ -2474,13 +2503,13 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) {
type |= QEMU_AIO_MISALIGNED;
#ifdef CONFIG_LINUX_IO_URING
} else if (s->use_linux_io_uring) {
} else if (raw_check_linux_io_uring(s)) {
assert(qiov->size == bytes);
ret = luring_co_submit(bs, s->fd, offset, qiov, type);
goto out;
#endif
#ifdef CONFIG_LINUX_AIO
} else if (s->use_linux_aio) {
} else if (raw_check_linux_aio(s)) {
assert(qiov->size == bytes);
ret = laio_co_submit(s->fd, offset, qiov, type,
s->aio_max_batch);
@ -2567,39 +2596,13 @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
};
#ifdef CONFIG_LINUX_IO_URING
if (s->use_linux_io_uring) {
if (raw_check_linux_io_uring(s)) {
return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
}
#endif
return raw_thread_pool_submit(handle_aiocb_flush, &acb);
}
static void raw_aio_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
BDRVRawState __attribute__((unused)) *s = bs->opaque;
#ifdef CONFIG_LINUX_AIO
if (s->use_linux_aio) {
Error *local_err = NULL;
if (!aio_setup_linux_aio(new_context, &local_err)) {
error_reportf_err(local_err, "Unable to use native AIO, "
"falling back to thread pool: ");
s->use_linux_aio = false;
}
}
#endif
#ifdef CONFIG_LINUX_IO_URING
if (s->use_linux_io_uring) {
Error *local_err = NULL;
if (!aio_setup_linux_io_uring(new_context, &local_err)) {
error_reportf_err(local_err, "Unable to use linux io_uring, "
"falling back to thread pool: ");
s->use_linux_io_uring = false;
}
}
#endif
}
static void raw_close(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
@ -3896,7 +3899,6 @@ BlockDriver bdrv_file = {
.bdrv_co_copy_range_from = raw_co_copy_range_from,
.bdrv_co_copy_range_to = raw_co_copy_range_to,
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_attach_aio_context = raw_aio_attach_aio_context,
.bdrv_co_truncate = raw_co_truncate,
.bdrv_co_getlength = raw_co_getlength,
@ -4266,7 +4268,6 @@ static BlockDriver bdrv_host_device = {
.bdrv_co_copy_range_from = raw_co_copy_range_from,
.bdrv_co_copy_range_to = raw_co_copy_range_to,
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_attach_aio_context = raw_aio_attach_aio_context,
.bdrv_co_truncate = raw_co_truncate,
.bdrv_co_getlength = raw_co_getlength,
@ -4402,7 +4403,6 @@ static BlockDriver bdrv_host_cdrom = {
.bdrv_co_pwritev = raw_co_pwritev,
.bdrv_co_flush_to_disk = raw_co_flush_to_disk,
.bdrv_refresh_limits = cdrom_refresh_limits,
.bdrv_attach_aio_context = raw_aio_attach_aio_context,
.bdrv_co_truncate = raw_co_truncate,
.bdrv_co_getlength = raw_co_getlength,
@ -4528,7 +4528,6 @@ static BlockDriver bdrv_host_cdrom = {
.bdrv_co_pwritev = raw_co_pwritev,
.bdrv_co_flush_to_disk = raw_co_flush_to_disk,
.bdrv_refresh_limits = cdrom_refresh_limits,
.bdrv_attach_aio_context = raw_aio_attach_aio_context,
.bdrv_co_truncate = raw_co_truncate,
.bdrv_co_getlength = raw_co_getlength,

View File

@ -106,27 +106,12 @@ static uint32_t reader_count(void)
return rd;
}
void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
void no_coroutine_fn bdrv_graph_wrlock(void)
{
AioContext *ctx = NULL;
GLOBAL_STATE_CODE();
assert(!qatomic_read(&has_writer));
assert(!qemu_in_coroutine());
/*
* Release only non-mainloop AioContext. The mainloop often relies on the
* BQL and doesn't lock the main AioContext before doing things.
*/
if (bs) {
ctx = bdrv_get_aio_context(bs);
if (ctx != qemu_get_aio_context()) {
aio_context_release(ctx);
} else {
ctx = NULL;
}
}
/* Make sure that constantly arriving new I/O doesn't cause starvation */
bdrv_drain_all_begin_nopoll();
@ -155,27 +140,13 @@ void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
} while (reader_count() >= 1);
bdrv_drain_all_end();
if (ctx) {
aio_context_acquire(bdrv_get_aio_context(bs));
}
}
void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx)
void no_coroutine_fn bdrv_graph_wrunlock(void)
{
GLOBAL_STATE_CODE();
assert(qatomic_read(&has_writer));
/*
* Release only non-mainloop AioContext. The mainloop often relies on the
* BQL and doesn't lock the main AioContext before doing things.
*/
if (ctx && ctx != qemu_get_aio_context()) {
aio_context_release(ctx);
} else {
ctx = NULL;
}
WITH_QEMU_LOCK_GUARD(&aio_context_list_lock) {
/*
* No need for memory barriers, this works in pair with
@ -197,17 +168,6 @@ void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx)
* progress.
*/
aio_bh_poll(qemu_get_aio_context());
if (ctx) {
aio_context_acquire(ctx);
}
}
void no_coroutine_fn bdrv_graph_wrunlock(BlockDriverState *bs)
{
AioContext *ctx = bs ? bdrv_get_aio_context(bs) : NULL;
bdrv_graph_wrunlock_ctx(ctx);
}
void coroutine_fn bdrv_graph_co_rdlock(void)

View File

@ -294,8 +294,6 @@ static void bdrv_co_drain_bh_cb(void *opaque)
BlockDriverState *bs = data->bs;
if (bs) {
AioContext *ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
bdrv_dec_in_flight(bs);
if (data->begin) {
bdrv_do_drained_begin(bs, data->parent, data->poll);
@ -303,7 +301,6 @@ static void bdrv_co_drain_bh_cb(void *opaque)
assert(!data->poll);
bdrv_do_drained_end(bs, data->parent);
}
aio_context_release(ctx);
} else {
assert(data->begin);
bdrv_drain_all_begin();
@ -320,8 +317,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
{
BdrvCoDrainData data;
Coroutine *self = qemu_coroutine_self();
AioContext *ctx = bdrv_get_aio_context(bs);
AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
/* Calling bdrv_drain() from a BH ensures the current coroutine yields and
* other coroutines run if they were queued by aio_co_enter(). */
@ -340,17 +335,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
bdrv_inc_in_flight(bs);
}
/*
* Temporarily drop the lock across yield or we would get deadlocks.
* bdrv_co_drain_bh_cb() reaquires the lock as needed.
*
* When we yield below, the lock for the current context will be
* released, so if this is actually the lock that protects bs, don't drop
* it a second time.
*/
if (ctx != co_ctx) {
aio_context_release(ctx);
}
replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
bdrv_co_drain_bh_cb, &data);
@ -358,11 +342,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
/* If we are resumed from some other event (such as an aio completion or a
* timer callback), it is a bug in the caller that should be fixed. */
assert(data.done);
/* Reacquire the AioContext of bs if we dropped it */
if (ctx != co_ctx) {
aio_context_acquire(ctx);
}
}
static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
@ -478,13 +457,12 @@ static bool bdrv_drain_all_poll(void)
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
/*
* bdrv_drain_poll() can't make changes to the graph and we hold the BQL,
* so iterating bdrv_next_all_states() is safe.
*/
while ((bs = bdrv_next_all_states(bs))) {
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
result |= bdrv_drain_poll(bs, NULL, true);
aio_context_release(aio_context);
}
return result;
@ -525,11 +503,7 @@ void bdrv_drain_all_begin_nopoll(void)
/* Quiesce all nodes, without polling in-flight requests yet. The graph
* cannot change during this loop. */
while ((bs = bdrv_next_all_states(bs))) {
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
bdrv_do_drained_begin(bs, NULL, false);
aio_context_release(aio_context);
}
}
@ -588,11 +562,7 @@ void bdrv_drain_all_end(void)
}
while ((bs = bdrv_next_all_states(bs))) {
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
bdrv_do_drained_end(bs, NULL);
aio_context_release(aio_context);
}
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
@ -1756,22 +1726,29 @@ static int bdrv_pad_request(BlockDriverState *bs,
return 0;
}
sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
&sliced_head, &sliced_tail,
&sliced_niov);
/*
* For prefetching in stream_populate(), no qiov is passed along, because
* only copy-on-read matters.
*/
if (*qiov) {
sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
&sliced_head, &sliced_tail,
&sliced_niov);
/* Guaranteed by bdrv_check_request32() */
assert(*bytes <= SIZE_MAX);
ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
sliced_head, *bytes);
if (ret < 0) {
bdrv_padding_finalize(pad);
return ret;
/* Guaranteed by bdrv_check_request32() */
assert(*bytes <= SIZE_MAX);
ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
sliced_head, *bytes);
if (ret < 0) {
bdrv_padding_finalize(pad);
return ret;
}
*qiov = &pad->local_qiov;
*qiov_offset = 0;
}
*bytes += pad->head + pad->tail;
*offset -= pad->head;
*qiov = &pad->local_qiov;
*qiov_offset = 0;
if (padded) {
*padded = true;
}
@ -2368,15 +2345,10 @@ int bdrv_flush_all(void)
}
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
int ret;
aio_context_acquire(aio_context);
ret = bdrv_flush(bs);
int ret = bdrv_flush(bs);
if (ret < 0 && !result) {
result = ret;
}
aio_context_release(aio_context);
}
return result;

View File

@ -49,7 +49,7 @@ typedef struct LuringQueue {
QSIMPLEQ_HEAD(, LuringAIOCB) submit_queue;
} LuringQueue;
typedef struct LuringState {
struct LuringState {
AioContext *aio_context;
struct io_uring ring;
@ -58,7 +58,7 @@ typedef struct LuringState {
LuringQueue io_q;
QEMUBH *completion_bh;
} LuringState;
};
/**
* luring_resubmit:
@ -102,7 +102,7 @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb,
/* Update sqe */
luringcb->sqeq.off += nread;
luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov;
luringcb->sqeq.addr = (uintptr_t)luringcb->resubmit_qiov.iov;
luringcb->sqeq.len = luringcb->resubmit_qiov.niov;
luring_resubmit(s, luringcb);
@ -432,7 +432,7 @@ LuringState *luring_init(Error **errp)
rc = io_uring_queue_init(MAX_ENTRIES, ring, 0);
if (rc < 0) {
error_setg_errno(errp, errno, "failed to init linux io_uring ring");
error_setg_errno(errp, -rc, "failed to init linux io_uring ring");
g_free(s);
return NULL;
}

View File

@ -88,10 +88,15 @@ if get_option('parallels').allowed()
block_ss.add(files('parallels.c', 'parallels-ext.c'))
endif
block_ss.add(when: 'CONFIG_WIN32', if_true: files('file-win32.c', 'win32-aio.c'))
block_ss.add(when: 'CONFIG_POSIX', if_true: [files('file-posix.c'), coref, iokit])
if host_os == 'windows'
block_ss.add(files('file-win32.c', 'win32-aio.c'))
else
block_ss.add(files('file-posix.c'), coref, iokit)
endif
block_ss.add(when: libiscsi, if_true: files('iscsi-opts.c'))
block_ss.add(when: 'CONFIG_LINUX', if_true: files('nvme.c'))
if host_os == 'linux'
block_ss.add(files('nvme.c'))
endif
if get_option('replication').allowed()
block_ss.add(files('replication.c'))
endif

View File

@ -479,9 +479,9 @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
return bytes_handled;
}
static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s)
static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s)
{
BlockDriverState *source = s->mirror_top_bs->backing->bs;
BlockDriverState *source;
MirrorOp *pseudo_op;
int64_t offset;
/* At least the first dirty chunk is mirrored in one iteration. */
@ -489,6 +489,10 @@ static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s)
bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
bdrv_graph_co_rdlock();
source = s->mirror_top_bs->backing->bs;
bdrv_graph_co_rdunlock();
bdrv_dirty_bitmap_lock(s->dirty_bitmap);
offset = bdrv_dirty_iter_next(s->dbi);
if (offset < 0) {
@ -662,7 +666,6 @@ static int mirror_exit_common(Job *job)
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockJob *bjob = &s->common;
MirrorBDSOpaque *bs_opaque;
AioContext *replace_aio_context = NULL;
BlockDriverState *src;
BlockDriverState *target_bs;
BlockDriverState *mirror_top_bs;
@ -677,7 +680,6 @@ static int mirror_exit_common(Job *job)
}
s->prepared = true;
aio_context_acquire(qemu_get_aio_context());
bdrv_graph_rdlock_main_loop();
mirror_top_bs = s->mirror_top_bs;
@ -742,11 +744,6 @@ static int mirror_exit_common(Job *job)
}
bdrv_graph_rdunlock_main_loop();
if (s->to_replace) {
replace_aio_context = bdrv_get_aio_context(s->to_replace);
aio_context_acquire(replace_aio_context);
}
if (s->should_complete && !abort) {
BlockDriverState *to_replace = s->to_replace ?: src;
bool ro = bdrv_is_read_only(to_replace);
@ -764,7 +761,7 @@ static int mirror_exit_common(Job *job)
* check for an op blocker on @to_replace, and we have our own
* there.
*/
bdrv_graph_wrlock(target_bs);
bdrv_graph_wrlock();
if (bdrv_recurse_can_replace(src, to_replace)) {
bdrv_replace_node(to_replace, target_bs, &local_err);
} else {
@ -773,7 +770,7 @@ static int mirror_exit_common(Job *job)
"would not lead to an abrupt change of visible data",
to_replace->node_name, target_bs->node_name);
}
bdrv_graph_wrunlock(target_bs);
bdrv_graph_wrunlock();
bdrv_drained_end(to_replace);
if (local_err) {
error_report_err(local_err);
@ -785,9 +782,6 @@ static int mirror_exit_common(Job *job)
error_free(s->replace_blocker);
bdrv_unref(s->to_replace);
}
if (replace_aio_context) {
aio_context_release(replace_aio_context);
}
g_free(s->replaces);
/*
@ -796,9 +790,9 @@ static int mirror_exit_common(Job *job)
* valid.
*/
block_job_remove_all_bdrv(bjob);
bdrv_graph_wrlock(mirror_top_bs);
bdrv_graph_wrlock();
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
bdrv_graph_wrunlock(mirror_top_bs);
bdrv_graph_wrunlock();
bdrv_drained_end(target_bs);
bdrv_unref(target_bs);
@ -811,8 +805,6 @@ static int mirror_exit_common(Job *job)
bdrv_unref(mirror_top_bs);
bdrv_unref(src);
aio_context_release(qemu_get_aio_context());
return ret;
}
@ -1078,9 +1070,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
mirror_wait_for_free_in_flight_slot(s);
continue;
} else if (cnt != 0) {
bdrv_graph_co_rdlock();
mirror_iteration(s);
bdrv_graph_co_rdunlock();
}
}
@ -1191,24 +1181,17 @@ static void mirror_complete(Job *job, Error **errp)
/* block all operations on to_replace bs */
if (s->replaces) {
AioContext *replace_aio_context;
s->to_replace = bdrv_find_node(s->replaces);
if (!s->to_replace) {
error_setg(errp, "Node name '%s' not found", s->replaces);
return;
}
replace_aio_context = bdrv_get_aio_context(s->to_replace);
aio_context_acquire(replace_aio_context);
/* TODO Translate this into child freeze system. */
error_setg(&s->replace_blocker,
"block device is in use by block-job-complete");
bdrv_op_block_all(s->to_replace, s->replace_blocker);
bdrv_ref(s->to_replace);
aio_context_release(replace_aio_context);
}
s->should_complete = true;
@ -1914,13 +1897,13 @@ static BlockJob *mirror_start_job(
*/
bdrv_disable_dirty_bitmap(s->dirty_bitmap);
bdrv_graph_wrlock(bs);
bdrv_graph_wrlock();
ret = block_job_add_bdrv(&s->common, "source", bs, 0,
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
BLK_PERM_CONSISTENT_READ,
errp);
if (ret < 0) {
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
goto fail;
}
@ -1965,17 +1948,17 @@ static BlockJob *mirror_start_job(
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
iter_shared_perms, errp);
if (ret < 0) {
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
goto fail;
}
}
if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
goto fail;
}
}
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
QTAILQ_INIT(&s->ops_in_flight);
@ -2001,12 +1984,12 @@ fail:
bs_opaque->stop = true;
bdrv_drained_begin(bs);
bdrv_graph_wrlock(bs);
bdrv_graph_wrlock();
assert(mirror_top_bs->backing->bs == bs);
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
&error_abort);
bdrv_replace_node(mirror_top_bs, bs, &error_abort);
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
bdrv_drained_end(bs);
bdrv_unref(mirror_top_bs);

View File

@ -95,7 +95,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
{
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
AioContext *aio_context;
if (!name || name[0] == '\0') {
error_setg(errp, "Bitmap name cannot be empty");
@ -107,14 +106,11 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (has_granularity) {
if (granularity < 512 || !is_power_of_2(granularity)) {
error_setg(errp, "Granularity must be power of 2 "
"and at least 512");
goto out;
return;
}
} else {
/* Default to cluster size, if available: */
@ -132,12 +128,12 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
if (persistent &&
!bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp))
{
goto out;
return;
}
bitmap = bdrv_create_dirty_bitmap(bs, granularity, name, errp);
if (bitmap == NULL) {
goto out;
return;
}
if (disabled) {
@ -145,9 +141,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
}
bdrv_dirty_bitmap_set_persistence(bitmap, persistent);
out:
aio_context_release(aio_context);
}
BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
@ -157,7 +150,6 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
{
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
AioContext *aio_context;
GLOBAL_STATE_CODE();
@ -166,19 +158,14 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
return NULL;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO,
errp)) {
aio_context_release(aio_context);
return NULL;
}
if (bdrv_dirty_bitmap_get_persistence(bitmap) &&
bdrv_remove_persistent_dirty_bitmap(bs, name, errp) < 0)
{
aio_context_release(aio_context);
return NULL;
}
@ -190,7 +177,6 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
*bitmap_bs = bs;
}
aio_context_release(aio_context);
return release ? NULL : bitmap;
}

View File

@ -141,7 +141,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict)
const char *id = qdict_get_str(qdict, "id");
BlockBackend *blk;
BlockDriverState *bs;
AioContext *aio_context;
Error *local_err = NULL;
GLOBAL_STATE_CODE();
@ -168,14 +167,10 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict)
return;
}
aio_context = blk_get_aio_context(blk);
aio_context_acquire(aio_context);
bs = blk_bs(blk);
if (bs) {
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) {
error_report_err(local_err);
aio_context_release(aio_context);
return;
}
@ -196,8 +191,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict)
} else {
blk_unref(blk);
}
aio_context_release(aio_context);
}
void hmp_commit(Monitor *mon, const QDict *qdict)
@ -213,7 +206,6 @@ void hmp_commit(Monitor *mon, const QDict *qdict)
ret = blk_commit_all();
} else {
BlockDriverState *bs;
AioContext *aio_context;
blk = blk_by_name(device);
if (!blk) {
@ -222,18 +214,13 @@ void hmp_commit(Monitor *mon, const QDict *qdict)
}
bs = bdrv_skip_implicit_filters(blk_bs(blk));
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (!blk_is_available(blk)) {
error_report("Device '%s' has no medium", device);
aio_context_release(aio_context);
return;
}
ret = bdrv_commit(bs);
aio_context_release(aio_context);
}
if (ret < 0) {
error_report("'commit' error for '%s': %s", device, strerror(-ret));
@ -509,7 +496,7 @@ void hmp_block_stream(Monitor *mon, const QDict *qdict)
const char *base = qdict_get_try_str(qdict, "base");
int64_t speed = qdict_get_try_int(qdict, "speed", 0);
qmp_block_stream(device, device, base, NULL, NULL, NULL,
qmp_block_stream(device, device, base, NULL, NULL, false, false, NULL,
qdict_haskey(qdict, "speed"), speed,
true, BLOCKDEV_ON_ERROR_REPORT, NULL,
false, false, false, false, &error);
@ -560,7 +547,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
BlockBackend *blk = NULL;
BlockDriverState *bs = NULL;
BlockBackend *local_blk = NULL;
AioContext *ctx = NULL;
bool qdev = qdict_get_try_bool(qdict, "qdev", false);
const char *device = qdict_get_str(qdict, "device");
const char *command = qdict_get_str(qdict, "command");
@ -582,9 +568,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
}
}
ctx = blk ? blk_get_aio_context(blk) : bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (bs) {
blk = local_blk = blk_new(bdrv_get_aio_context(bs), 0, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, &err);
@ -622,11 +605,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
fail:
blk_unref(local_blk);
if (ctx) {
aio_context_release(ctx);
}
hmp_handle_error(mon, err);
}
@ -882,7 +860,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
int nb_sns, i;
int total;
int *global_snapshots;
AioContext *aio_context;
typedef struct SnapshotEntry {
QEMUSnapshotInfo sn;
@ -909,11 +886,8 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
error_report_err(err);
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
aio_context_release(aio_context);
if (nb_sns < 0) {
monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns);
@ -924,9 +898,7 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
int bs1_nb_sns = 0;
ImageEntry *ie;
SnapshotEntry *se;
AioContext *ctx = bdrv_get_aio_context(bs1);
aio_context_acquire(ctx);
if (bdrv_can_snapshot(bs1)) {
sn = NULL;
bs1_nb_sns = bdrv_snapshot_list(bs1, &sn);
@ -944,7 +916,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
}
g_free(sn);
}
aio_context_release(ctx);
}
if (no_snapshot) {

View File

@ -852,6 +852,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
BDRVNBDState *s, uint64_t cookie, bool only_structured,
int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
{
ERRP_GUARD();
int ret;
int i = COOKIE_TO_INDEX(cookie);
void *local_payload = NULL;

View File

@ -168,6 +168,7 @@ static QemuOptsList runtime_opts = {
static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
unsigned nentries, size_t entry_bytes, Error **errp)
{
ERRP_GUARD();
size_t bytes;
int r;
@ -221,6 +222,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
unsigned idx, size_t size,
Error **errp)
{
ERRP_GUARD();
int i, r;
NVMeQueuePair *q;
uint64_t prp_list_iova;
@ -535,6 +537,7 @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
/* Returns true on success, false on failure. */
static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
{
ERRP_GUARD();
BDRVNVMeState *s = bs->opaque;
bool ret = false;
QEMU_AUTO_VFREE union {

View File

@ -174,7 +174,6 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp)
{
BlockBackend *blk;
BlockDriverState *bs;
AioContext *aio_context;
bool has_attached_device;
GLOBAL_STATE_CODE();
@ -204,13 +203,10 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp)
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
bdrv_graph_rdlock_main_loop();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
bdrv_graph_rdunlock_main_loop();
goto out;
return;
}
bdrv_graph_rdunlock_main_loop();
@ -223,9 +219,6 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp)
* value passed here (i.e. false). */
blk_dev_change_media_cb(blk, false, &error_abort);
}
out:
aio_context_release(aio_context);
}
void qmp_blockdev_remove_medium(const char *id, Error **errp)
@ -237,7 +230,6 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
BlockDriverState *bs, Error **errp)
{
Error *local_err = NULL;
AioContext *ctx;
bool has_device;
int ret;
@ -259,11 +251,7 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
return;
}
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
ret = blk_insert_bs(blk, bs, errp);
aio_context_release(ctx);
if (ret < 0) {
return;
}
@ -374,9 +362,7 @@ void qmp_blockdev_change_medium(const char *device,
qdict_put_str(options, "driver", format);
}
aio_context_acquire(qemu_get_aio_context());
medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp);
aio_context_release(qemu_get_aio_context());
if (!medium_bs) {
goto fail;
@ -437,20 +423,16 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
ThrottleConfig cfg;
BlockDriverState *bs;
BlockBackend *blk;
AioContext *aio_context;
blk = qmp_get_blk(arg->device, arg->id, errp);
if (!blk) {
return;
}
aio_context = blk_get_aio_context(blk);
aio_context_acquire(aio_context);
bs = blk_bs(blk);
if (!bs) {
error_setg(errp, "Device has no medium");
goto out;
return;
}
throttle_config_init(&cfg);
@ -505,7 +487,7 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
}
if (!throttle_is_valid(&cfg, errp)) {
goto out;
return;
}
if (throttle_enabled(&cfg)) {
@ -522,9 +504,6 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
/* If all throttling settings are set to 0, disable I/O limits */
blk_io_limits_disable(blk);
}
out:
aio_context_release(aio_context);
}
void qmp_block_latency_histogram_set(

View File

@ -46,11 +46,11 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
bool flat,
Error **errp)
{
ERRP_GUARD();
ImageInfo **p_image_info;
ImageInfo *backing_info;
BlockDriverState *backing;
BlockDeviceInfo *info;
ERRP_GUARD();
if (!bs->drv) {
error_setg(errp, "Block device %s is ejected", bs->node_name);
@ -234,13 +234,11 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp)
int ret;
Error *err = NULL;
aio_context_acquire(bdrv_get_aio_context(bs));
size = bdrv_getlength(bs);
if (size < 0) {
error_setg_errno(errp, -size, "Can't get image size '%s'",
bs->exact_filename);
goto out;
return;
}
bdrv_refresh_filename(bs);
@ -265,7 +263,7 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp)
info->format_specific = bdrv_get_specific_info(bs, &err);
if (err) {
error_propagate(errp, err);
goto out;
return;
}
backing_filename = bs->backing_file;
if (backing_filename[0] != '\0') {
@ -300,11 +298,8 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp)
break;
default:
error_propagate(errp, err);
goto out;
return;
}
out:
aio_context_release(bdrv_get_aio_context(bs));
}
/**
@ -335,8 +330,8 @@ void bdrv_query_image_info(BlockDriverState *bs,
bool skip_implicit_filters,
Error **errp)
{
ImageInfo *info;
ERRP_GUARD();
ImageInfo *info;
info = g_new0(ImageInfo, 1);
bdrv_do_query_node_info(bs, qapi_ImageInfo_base(info), errp);
@ -387,10 +382,10 @@ void bdrv_query_block_graph_info(BlockDriverState *bs,
BlockGraphInfo **p_info,
Error **errp)
{
ERRP_GUARD();
BlockGraphInfo *info;
BlockChildInfoList **children_list_tail;
BdrvChild *c;
ERRP_GUARD();
info = g_new0(BlockGraphInfo, 1);
bdrv_do_query_node_info(bs, qapi_BlockGraphInfo_base(info), errp);
@ -709,15 +704,10 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
/* Just to be safe if query_nodes is not always initialized */
if (has_query_nodes && query_nodes) {
for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
AioContext *ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
QAPI_LIST_APPEND(tail, bdrv_query_bds_stats(bs, false));
aio_context_release(ctx);
}
} else {
for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
AioContext *ctx = blk_get_aio_context(blk);
BlockStats *s;
char *qdev;
@ -725,7 +715,6 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
continue;
}
aio_context_acquire(ctx);
s = bdrv_query_bds_stats(blk_bs(blk), true);
s->device = g_strdup(blk_name(blk));
@ -737,7 +726,6 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
}
bdrv_query_blk_stats(s->stats, blk);
aio_context_release(ctx);
QAPI_LIST_APPEND(tail, s);
}
@ -754,15 +742,15 @@ void bdrv_snapshot_dump(QEMUSnapshotInfo *sn)
char *sizing = NULL;
if (!sn) {
qemu_printf("%-10s%-17s%8s%20s%13s%11s",
"ID", "TAG", "VM SIZE", "DATE", "VM CLOCK", "ICOUNT");
qemu_printf("%-7s %-16s %8s %19s %15s %10s",
"ID", "TAG", "VM_SIZE", "DATE", "VM_CLOCK", "ICOUNT");
} else {
g_autoptr(GDateTime) date = g_date_time_new_from_unix_local(sn->date_sec);
g_autofree char *date_buf = g_date_time_format(date, "%Y-%m-%d %H:%M:%S");
secs = sn->vm_clock_nsec / 1000000000;
snprintf(clock_buf, sizeof(clock_buf),
"%02d:%02d:%02d.%03d",
"%04d:%02d:%02d.%03d",
(int)(secs / 3600),
(int)((secs / 60) % 60),
(int)(secs % 60),
@ -771,8 +759,10 @@ void bdrv_snapshot_dump(QEMUSnapshotInfo *sn)
if (sn->icount != -1ULL) {
snprintf(icount_buf, sizeof(icount_buf),
"%"PRId64, sn->icount);
} else {
snprintf(icount_buf, sizeof(icount_buf), "--");
}
qemu_printf("%-9s %-16s %8s%20s%13s%11s",
qemu_printf("%-7s %-16s %8s %19s %15s %10s",
sn->id_str, sn->name,
sizing,
date_buf,

View File

@ -885,7 +885,7 @@ qcow_co_create(BlockdevCreateOptions *opts, Error **errp)
header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
crypto = qcrypto_block_create(qcow_opts->encrypt, "encrypt.",
NULL, NULL, NULL, errp);
NULL, NULL, NULL, 0, errp);
if (!crypto) {
ret = -EINVAL;
goto exit;

View File

@ -1710,6 +1710,7 @@ bool coroutine_fn qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs,
uint32_t granularity,
Error **errp)
{
ERRP_GUARD();
BDRVQcow2State *s = bs->opaque;
BdrvDirtyBitmap *bitmap;
uint64_t bitmap_directory_size = 0;

View File

@ -2807,9 +2807,9 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file)
if (close_data_file && has_data_file(bs)) {
GLOBAL_STATE_CODE();
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->data_file);
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
s->data_file = NULL;
bdrv_graph_rdlock_main_loop();
}
@ -3216,7 +3216,7 @@ qcow2_set_up_encryption(BlockDriverState *bs,
crypto = qcrypto_block_create(cryptoopts, "encrypt.",
qcow2_crypto_hdr_init_func,
qcow2_crypto_hdr_write_func,
bs, errp);
bs, 0, errp);
if (!crypto) {
return -EINVAL;
}
@ -3483,6 +3483,7 @@ static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version,
static int coroutine_fn GRAPH_UNLOCKED
qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
{
ERRP_GUARD();
BlockdevCreateOptionsQcow2 *qcow2_opts;
QDict *options;
@ -4283,6 +4284,7 @@ static int coroutine_fn GRAPH_RDLOCK
qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
{
ERRP_GUARD();
BDRVQcow2State *s = bs->opaque;
uint64_t old_length;
int64_t new_l1_size;

View File

@ -1579,6 +1579,7 @@ bdrv_qed_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
static void coroutine_fn GRAPH_RDLOCK
bdrv_qed_co_invalidate_cache(BlockDriverState *bs, Error **errp)
{
ERRP_GUARD();
BDRVQEDState *s = bs->opaque;
int ret;

View File

@ -1037,14 +1037,14 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
close_exit:
/* cleanup on error */
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
for (i = 0; i < s->num_children; i++) {
if (!opened[i]) {
continue;
}
bdrv_unref_child(bs, s->children[i]);
}
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
g_free(s->children);
g_free(opened);
exit:
@ -1057,11 +1057,11 @@ static void quorum_close(BlockDriverState *bs)
BDRVQuorumState *s = bs->opaque;
int i;
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
for (i = 0; i < s->num_children; i++) {
bdrv_unref_child(bs, s->children[i]);
}
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
g_free(s->children);
}

View File

@ -470,7 +470,6 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVRawState *s = bs->opaque;
AioContext *ctx;
bool has_size;
uint64_t offset, size;
BdrvChildRole file_role;
@ -522,11 +521,7 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
bs->file->bs->filename);
}
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
ret = raw_apply_options(bs, s, offset, has_size, size, errp);
aio_context_release(ctx);
if (ret < 0) {
return ret;
}

View File

@ -394,14 +394,7 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
}
if (reopen_queue) {
AioContext *ctx = bdrv_get_aio_context(bs);
if (ctx != qemu_get_aio_context()) {
aio_context_release(ctx);
}
bdrv_reopen_multiple(reopen_queue, errp);
if (ctx != qemu_get_aio_context()) {
aio_context_acquire(ctx);
}
}
}
@ -462,14 +455,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
BlockDriverState *top_bs;
BdrvChild *active_disk, *hidden_disk, *secondary_disk;
int64_t active_length, hidden_length, disk_length;
AioContext *aio_context;
Error *local_err = NULL;
BackupPerf perf = { .use_copy_range = true, .max_workers = 1 };
GLOBAL_STATE_CODE();
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
s = bs->opaque;
if (s->stage == BLOCK_REPLICATION_DONE ||
@ -479,20 +469,17 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
* Ignore the request because the secondary side of replication
* doesn't have to do anything anymore.
*/
aio_context_release(aio_context);
return;
}
if (s->stage != BLOCK_REPLICATION_NONE) {
error_setg(errp, "Block replication is running or done");
aio_context_release(aio_context);
return;
}
if (s->mode != mode) {
error_setg(errp, "The parameter mode's value is invalid, needs %d,"
" but got %d", s->mode, mode);
aio_context_release(aio_context);
return;
}
@ -505,7 +492,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (!active_disk || !active_disk->bs || !active_disk->bs->backing) {
error_setg(errp, "Active disk doesn't have backing file");
bdrv_graph_rdunlock_main_loop();
aio_context_release(aio_context);
return;
}
@ -513,7 +499,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (!hidden_disk->bs || !hidden_disk->bs->backing) {
error_setg(errp, "Hidden disk doesn't have backing file");
bdrv_graph_rdunlock_main_loop();
aio_context_release(aio_context);
return;
}
@ -521,7 +506,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) {
error_setg(errp, "The secondary disk doesn't have block backend");
bdrv_graph_rdunlock_main_loop();
aio_context_release(aio_context);
return;
}
bdrv_graph_rdunlock_main_loop();
@ -534,7 +518,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
active_length != hidden_length || hidden_length != disk_length) {
error_setg(errp, "Active disk, hidden disk, secondary disk's length"
" are not the same");
aio_context_release(aio_context);
return;
}
@ -546,7 +529,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
!hidden_disk->bs->drv->bdrv_make_empty) {
error_setg(errp,
"Active disk or hidden disk doesn't support make_empty");
aio_context_release(aio_context);
bdrv_graph_rdunlock_main_loop();
return;
}
@ -556,11 +538,10 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
reopen_backing_file(bs, true, &local_err);
if (local_err) {
error_propagate(errp, local_err);
aio_context_release(aio_context);
return;
}
bdrv_graph_wrlock(bs);
bdrv_graph_wrlock();
bdrv_ref(hidden_disk->bs);
s->hidden_disk = bdrv_attach_child(bs, hidden_disk->bs, "hidden disk",
@ -568,8 +549,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
&local_err);
if (local_err) {
error_propagate(errp, local_err);
bdrv_graph_wrunlock(bs);
aio_context_release(aio_context);
bdrv_graph_wrunlock();
return;
}
@ -579,8 +559,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
BDRV_CHILD_DATA, &local_err);
if (local_err) {
error_propagate(errp, local_err);
bdrv_graph_wrunlock(bs);
aio_context_release(aio_context);
bdrv_graph_wrunlock();
return;
}
@ -592,15 +571,14 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (!top_bs || !bdrv_is_root_node(top_bs) ||
!check_top_bs(top_bs, bs)) {
error_setg(errp, "No top_bs or it is invalid");
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
reopen_backing_file(bs, false, NULL);
aio_context_release(aio_context);
return;
}
bdrv_op_block_all(top_bs, s->blocker);
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
s->backup_job = backup_job_create(
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
@ -612,13 +590,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (local_err) {
error_propagate(errp, local_err);
backup_job_cleanup(bs);
aio_context_release(aio_context);
return;
}
job_start(&s->backup_job->job);
break;
default:
aio_context_release(aio_context);
abort();
}
@ -629,18 +605,12 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
}
s->error = 0;
aio_context_release(aio_context);
}
static void replication_do_checkpoint(ReplicationState *rs, Error **errp)
{
BlockDriverState *bs = rs->opaque;
BDRVReplicationState *s;
AioContext *aio_context;
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
s = bs->opaque;
BDRVReplicationState *s = bs->opaque;
if (s->stage == BLOCK_REPLICATION_DONE ||
s->stage == BLOCK_REPLICATION_FAILOVER) {
@ -649,38 +619,28 @@ static void replication_do_checkpoint(ReplicationState *rs, Error **errp)
* Ignore the request because the secondary side of replication
* doesn't have to do anything anymore.
*/
aio_context_release(aio_context);
return;
}
if (s->mode == REPLICATION_MODE_SECONDARY) {
secondary_do_checkpoint(bs, errp);
}
aio_context_release(aio_context);
}
static void replication_get_error(ReplicationState *rs, Error **errp)
{
BlockDriverState *bs = rs->opaque;
BDRVReplicationState *s;
AioContext *aio_context;
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
s = bs->opaque;
BDRVReplicationState *s = bs->opaque;
if (s->stage == BLOCK_REPLICATION_NONE) {
error_setg(errp, "Block replication is not running");
aio_context_release(aio_context);
return;
}
if (s->error) {
error_setg(errp, "I/O error occurred");
aio_context_release(aio_context);
return;
}
aio_context_release(aio_context);
}
static void replication_done(void *opaque, int ret)
@ -691,12 +651,12 @@ static void replication_done(void *opaque, int ret)
if (ret == 0) {
s->stage = BLOCK_REPLICATION_DONE;
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, s->secondary_disk);
s->secondary_disk = NULL;
bdrv_unref_child(bs, s->hidden_disk);
s->hidden_disk = NULL;
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
s->error = 0;
} else {
@ -708,12 +668,7 @@ static void replication_done(void *opaque, int ret)
static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
{
BlockDriverState *bs = rs->opaque;
BDRVReplicationState *s;
AioContext *aio_context;
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
s = bs->opaque;
BDRVReplicationState *s = bs->opaque;
if (s->stage == BLOCK_REPLICATION_DONE ||
s->stage == BLOCK_REPLICATION_FAILOVER) {
@ -722,13 +677,11 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
* Ignore the request because the secondary side of replication
* doesn't have to do anything anymore.
*/
aio_context_release(aio_context);
return;
}
if (s->stage != BLOCK_REPLICATION_RUNNING) {
error_setg(errp, "Block replication is not running");
aio_context_release(aio_context);
return;
}
@ -744,15 +697,12 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
* disk, secondary disk in backup_job_completed().
*/
if (s->backup_job) {
aio_context_release(aio_context);
job_cancel_sync(&s->backup_job->job, true);
aio_context_acquire(aio_context);
}
if (!failover) {
secondary_do_checkpoint(bs, errp);
s->stage = BLOCK_REPLICATION_DONE;
aio_context_release(aio_context);
return;
}
@ -765,10 +715,8 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
bdrv_graph_rdunlock_main_loop();
break;
default:
aio_context_release(aio_context);
abort();
}
aio_context_release(aio_context);
}
static const char *const replication_strong_runtime_opts[] = {

View File

@ -292,9 +292,9 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
}
/* .bdrv_open() will re-attach it */
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, fallback);
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp);
open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err);
@ -527,9 +527,7 @@ static bool GRAPH_RDLOCK bdrv_all_snapshots_includes_bs(BlockDriverState *bs)
return bdrv_has_blk(bs) || QLIST_EMPTY(&bs->parents);
}
/* Group operations. All block drivers are involved.
* These functions will properly handle dataplane (take aio_context_acquire
* when appropriate for appropriate block drivers) */
/* Group operations. All block drivers are involved. */
bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
Error **errp)
@ -547,14 +545,11 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
iterbdrvs = bdrvs;
while (iterbdrvs) {
BlockDriverState *bs = iterbdrvs->data;
AioContext *ctx = bdrv_get_aio_context(bs);
bool ok = true;
aio_context_acquire(ctx);
if (devices || bdrv_all_snapshots_includes_bs(bs)) {
ok = bdrv_can_snapshot(bs);
}
aio_context_release(ctx);
if (!ok) {
error_setg(errp, "Device '%s' is writable but does not support "
"snapshots", bdrv_get_device_or_node_name(bs));
@ -571,6 +566,7 @@ int bdrv_all_delete_snapshot(const char *name,
bool has_devices, strList *devices,
Error **errp)
{
ERRP_GUARD();
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
@ -584,18 +580,15 @@ int bdrv_all_delete_snapshot(const char *name,
iterbdrvs = bdrvs;
while (iterbdrvs) {
BlockDriverState *bs = iterbdrvs->data;
AioContext *ctx = bdrv_get_aio_context(bs);
QEMUSnapshotInfo sn1, *snapshot = &sn1;
int ret = 0;
aio_context_acquire(ctx);
if ((devices || bdrv_all_snapshots_includes_bs(bs)) &&
bdrv_snapshot_find(bs, snapshot, name) >= 0)
{
ret = bdrv_snapshot_delete(bs, snapshot->id_str,
snapshot->name, errp);
}
aio_context_release(ctx);
if (ret < 0) {
error_prepend(errp, "Could not delete snapshot '%s' on '%s': ",
name, bdrv_get_device_or_node_name(bs));
@ -613,6 +606,7 @@ int bdrv_all_goto_snapshot(const char *name,
bool has_devices, strList *devices,
Error **errp)
{
ERRP_GUARD();
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
int ret;
@ -630,17 +624,14 @@ int bdrv_all_goto_snapshot(const char *name,
iterbdrvs = bdrvs;
while (iterbdrvs) {
BlockDriverState *bs = iterbdrvs->data;
AioContext *ctx = bdrv_get_aio_context(bs);
bool all_snapshots_includes_bs;
aio_context_acquire(ctx);
bdrv_graph_rdlock_main_loop();
all_snapshots_includes_bs = bdrv_all_snapshots_includes_bs(bs);
bdrv_graph_rdunlock_main_loop();
ret = (devices || all_snapshots_includes_bs) ?
bdrv_snapshot_goto(bs, name, errp) : 0;
aio_context_release(ctx);
if (ret < 0) {
bdrv_graph_rdlock_main_loop();
error_prepend(errp, "Could not load snapshot '%s' on '%s': ",
@ -672,15 +663,12 @@ int bdrv_all_has_snapshot(const char *name,
iterbdrvs = bdrvs;
while (iterbdrvs) {
BlockDriverState *bs = iterbdrvs->data;
AioContext *ctx = bdrv_get_aio_context(bs);
QEMUSnapshotInfo sn;
int ret = 0;
aio_context_acquire(ctx);
if (devices || bdrv_all_snapshots_includes_bs(bs)) {
ret = bdrv_snapshot_find(bs, &sn, name);
}
aio_context_release(ctx);
if (ret < 0) {
if (ret == -ENOENT) {
return 0;
@ -717,10 +705,8 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
iterbdrvs = bdrvs;
while (iterbdrvs) {
BlockDriverState *bs = iterbdrvs->data;
AioContext *ctx = bdrv_get_aio_context(bs);
int ret = 0;
aio_context_acquire(ctx);
if (bs == vm_state_bs) {
sn->vm_state_size = vm_state_size;
ret = bdrv_snapshot_create(bs, sn);
@ -728,7 +714,6 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
sn->vm_state_size = 0;
ret = bdrv_snapshot_create(bs, sn);
}
aio_context_release(ctx);
if (ret < 0) {
error_setg(errp, "Could not create snapshot '%s' on '%s'",
sn->name, bdrv_get_device_or_node_name(bs));
@ -759,13 +744,10 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs,
iterbdrvs = bdrvs;
while (iterbdrvs) {
BlockDriverState *bs = iterbdrvs->data;
AioContext *ctx = bdrv_get_aio_context(bs);
bool found = false;
aio_context_acquire(ctx);
found = (devices || bdrv_all_snapshots_includes_bs(bs)) &&
bdrv_can_snapshot(bs);
aio_context_release(ctx);
if (vmstate_bs) {
if (g_str_equal(vmstate_bs,

View File

@ -39,6 +39,7 @@ typedef struct StreamBlockJob {
BlockDriverState *target_bs;
BlockdevOnError on_error;
char *backing_file_str;
bool backing_mask_protocol;
bool bs_read_only;
} StreamBlockJob;
@ -95,13 +96,18 @@ static int stream_prepare(Job *job)
if (unfiltered_base) {
base_id = s->backing_file_str ?: unfiltered_base->filename;
if (unfiltered_base->drv) {
base_fmt = unfiltered_base->drv->format_name;
if (s->backing_mask_protocol &&
unfiltered_base->drv->protocol_name) {
base_fmt = "raw";
} else {
base_fmt = unfiltered_base->drv->format_name;
}
}
}
bdrv_graph_wrlock(s->target_bs);
bdrv_graph_wrlock();
bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err);
bdrv_graph_wrunlock(s->target_bs);
bdrv_graph_wrunlock();
/*
* This call will do I/O, so the graph can change again from here on.
@ -247,6 +253,7 @@ static const BlockJobDriver stream_job_driver = {
void stream_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, const char *backing_file_str,
bool backing_mask_protocol,
BlockDriverState *bottom,
int creation_flags, int64_t speed,
BlockdevOnError on_error,
@ -366,10 +373,10 @@ void stream_start(const char *job_id, BlockDriverState *bs,
* already have our own plans. Also don't allow resize as the image size is
* queried only at the job start and then cached.
*/
bdrv_graph_wrlock(bs);
bdrv_graph_wrlock();
if (block_job_add_bdrv(&s->common, "active node", bs, 0,
basic_flags | BLK_PERM_WRITE, errp)) {
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
goto fail;
}
@ -389,15 +396,16 @@ void stream_start(const char *job_id, BlockDriverState *bs,
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
basic_flags, errp);
if (ret < 0) {
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
goto fail;
}
}
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
s->base_overlay = base_overlay;
s->above_base = above_base;
s->backing_file_str = g_strdup(backing_file_str);
s->backing_mask_protocol = backing_mask_protocol;
s->cor_filter_bs = cor_filter_bs;
s->target_bs = bs;
s->bs_read_only = bs_read_only;

View File

@ -738,6 +738,7 @@ static int coroutine_fn GRAPH_UNLOCKED
vdi_co_do_create(BlockdevCreateOptions *create_options, size_t block_size,
Error **errp)
{
ERRP_GUARD();
BlockdevCreateOptionsVdi *vdi_opts;
int ret = 0;
uint64_t bytes = 0;

View File

@ -272,7 +272,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
BDRVVmdkState *s = bs->opaque;
VmdkExtent *e;
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
for (i = 0; i < s->num_extents; i++) {
e = &s->extents[i];
g_free(e->l1_table);
@ -283,7 +283,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
bdrv_unref_child(bs, e->file);
}
}
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
g_free(s->extents);
}
@ -1147,6 +1147,7 @@ static int GRAPH_RDLOCK
vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
Error **errp)
{
ERRP_GUARD();
int ret;
int matches;
char access[11];
@ -1247,9 +1248,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
0, 0, 0, 0, 0, &extent, errp);
if (ret < 0) {
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
bdrv_graph_rdlock_main_loop();
goto out;
}
@ -1266,9 +1267,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
g_free(buf);
if (ret) {
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
bdrv_graph_rdlock_main_loop();
goto out;
}
@ -1277,9 +1278,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp);
if (ret) {
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
bdrv_graph_rdlock_main_loop();
goto out;
}
@ -1287,9 +1288,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
} else {
error_setg(errp, "Unsupported extent type '%s'", type);
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_graph_wrlock();
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock(NULL);
bdrv_graph_wrunlock();
bdrv_graph_rdlock_main_loop();
ret = -ENOTSUP;
goto out;

View File

@ -33,7 +33,6 @@ void qmp_block_set_write_threshold(const char *node_name,
Error **errp)
{
BlockDriverState *bs;
AioContext *aio_context;
bs = bdrv_find_node(node_name);
if (!bs) {
@ -41,12 +40,7 @@ void qmp_block_set_write_threshold(const char *node_name,
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
bdrv_write_threshold_set(bs, threshold_bytes);
aio_context_release(aio_context);
}
void bdrv_write_threshold_check_write(BlockDriverState *bs, int64_t offset,

File diff suppressed because it is too large Load Diff

View File

@ -198,9 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
* one to make sure that such a concurrent access does not attempt
* to process an already freed BdrvChild.
*/
aio_context_release(job->job.aio_context);
bdrv_graph_wrlock(NULL);
aio_context_acquire(job->job.aio_context);
bdrv_graph_wrlock();
while (job->nodes) {
GSList *l = job->nodes;
BdrvChild *c = l->data;
@ -212,7 +210,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
g_slist_free_1(l);
}
bdrv_graph_wrunlock_ctx(job->job.aio_context);
bdrv_graph_wrunlock();
}
bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
@ -234,28 +232,12 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
uint64_t perm, uint64_t shared_perm, Error **errp)
{
BdrvChild *c;
AioContext *ctx = bdrv_get_aio_context(bs);
bool need_context_ops;
GLOBAL_STATE_CODE();
bdrv_ref(bs);
need_context_ops = ctx != job->job.aio_context;
if (need_context_ops) {
if (job->job.aio_context != qemu_get_aio_context()) {
aio_context_release(job->job.aio_context);
}
aio_context_acquire(ctx);
}
c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
errp);
if (need_context_ops) {
aio_context_release(ctx);
if (job->job.aio_context != qemu_get_aio_context()) {
aio_context_acquire(job->job.aio_context);
}
}
if (c == NULL) {
return -EPERM;
}
@ -514,7 +496,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
int ret;
GLOBAL_STATE_CODE();
bdrv_graph_wrlock(bs);
bdrv_graph_wrlock();
if (job_id == NULL && !(flags & JOB_INTERNAL)) {
job_id = bdrv_get_device_name(bs);
@ -523,7 +505,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
flags, cb, opaque, errp);
if (job == NULL) {
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
return NULL;
}
@ -563,11 +545,11 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
goto fail;
}
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
return job;
fail:
bdrv_graph_wrunlock(bs);
bdrv_graph_wrunlock();
job_early_fail(&job->job);
return NULL;
}

View File

@ -641,7 +641,7 @@ static abi_long do_bsd_readlink(CPUArchState *env, abi_long arg1,
}
if (strcmp(p1, "/proc/curproc/file") == 0) {
CPUState *cpu = env_cpu(env);
TaskState *ts = (TaskState *)cpu->opaque;
TaskState *ts = get_task_state(cpu);
strncpy(p2, ts->bprm->fullpath, arg3);
ret = MIN((abi_long)strlen(ts->bprm->fullpath), arg3);
} else {

View File

@ -208,7 +208,7 @@ static inline abi_long do_freebsd_fork(void *cpu_env)
*/
set_second_rval(cpu_env, child_flag);
fork_end(child_flag);
fork_end(ret);
return ret;
}
@ -252,7 +252,7 @@ static inline abi_long do_freebsd_rfork(void *cpu_env, abi_long flags)
* value: 0 for parent process, 1 for child process.
*/
set_second_rval(cpu_env, child_flag);
fork_end(child_flag);
fork_end(ret);
return ret;
@ -285,7 +285,7 @@ static inline abi_long do_freebsd_pdfork(void *cpu_env, abi_ulong target_fdp,
* value: 0 for parent process, 1 for child process.
*/
set_second_rval(cpu_env, child_flag);
fork_end(child_flag);
fork_end(ret);
return ret;
}

Some files were not shown because too many files have changed in this diff Show More