Merge conflicts

This commit is contained in:
Andrea Fioraldi 2021-11-15 14:03:49 +01:00
commit 844841307d
1217 changed files with 47914 additions and 22376 deletions

34
.github/lockdown.yml vendored
View File

@ -1,34 +0,0 @@
# Configuration for Repo Lockdown - https://github.com/dessant/repo-lockdown
# Close issues and pull requests
close: true
# Lock issues and pull requests
lock: true
issues:
comment: |
Thank you for your interest in the QEMU project.
This repository is a read-only mirror of the project's repostories hosted
at https://gitlab.com/qemu-project/qemu.git.
The project does not process issues filed on GitHub.
The project issues are tracked on GitLab:
https://gitlab.com/qemu-project/qemu/-/issues
QEMU welcomes bug report contributions. You can file new ones on:
https://gitlab.com/qemu-project/qemu/-/issues/new
pulls:
comment: |
Thank you for your interest in the QEMU project.
This repository is a read-only mirror of the project's repostories hosted
on https://gitlab.com/qemu-project/qemu.git.
The project does not process merge requests filed on GitHub.
QEMU welcomes contributions of code (either fixing bugs or adding new
functionality). However, we get a lot of patches, and so we have some
guidelines about contributing on the project website:
https://www.qemu.org/contribute/

30
.github/workflows/lockdown.yml vendored Normal file
View File

@ -0,0 +1,30 @@
# Configuration for Repo Lockdown - https://github.com/dessant/repo-lockdown
name: 'Repo Lockdown'
on:
pull_request_target:
types: opened
permissions:
pull-requests: write
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/repo-lockdown@v2
with:
pull-comment: |
Thank you for your interest in the QEMU project.
This repository is a read-only mirror of the project's repostories hosted
on https://gitlab.com/qemu-project/qemu.git.
The project does not process merge requests filed on GitHub.
QEMU welcomes contributions of code (either fixing bugs or adding new
functionality). However, we get a lot of patches, and so we have some
guidelines about contributing on the project website:
https://www.qemu.org/contribute/
lock-pull: true
close-pull: true

View File

@ -37,7 +37,7 @@
# Avoid recompiling by hiding ninja with NINJA=":"
- make NINJA=":" $MAKE_CHECK_ARGS
.acceptance_test_job_template:
.avocado_test_job_template:
extends: .native_test_job_template
cache:
key: "${CI_JOB_NAME}-cache"

View File

@ -26,14 +26,14 @@ check-system-alpine:
IMAGE: alpine
MAKE_CHECK_ARGS: check
acceptance-system-alpine:
extends: .acceptance_test_job_template
avocado-system-alpine:
extends: .avocado_test_job_template
needs:
- job: build-system-alpine
artifacts: true
variables:
IMAGE: alpine
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
build-system-ubuntu:
extends: .native_build_job_template
@ -59,14 +59,14 @@ check-system-ubuntu:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
acceptance-system-ubuntu:
extends: .acceptance_test_job_template
avocado-system-ubuntu:
extends: .avocado_test_job_template
needs:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
build-system-debian:
extends: .native_build_job_template
@ -91,14 +91,14 @@ check-system-debian:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check
acceptance-system-debian:
extends: .acceptance_test_job_template
avocado-system-debian:
extends: .avocado_test_job_template
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
build-system-fedora:
extends: .native_build_job_template
@ -125,14 +125,14 @@ check-system-fedora:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-system-fedora:
extends: .acceptance_test_job_template
avocado-system-fedora:
extends: .avocado_test_job_template
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
build-system-centos:
extends: .native_build_job_template
@ -159,14 +159,14 @@ check-system-centos:
IMAGE: centos8
MAKE_CHECK_ARGS: check
acceptance-system-centos:
extends: .acceptance_test_job_template
avocado-system-centos:
extends: .avocado_test_job_template
needs:
- job: build-system-centos
artifacts: true
variables:
IMAGE: centos8
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
build-system-opensuse:
extends: .native_build_job_template
@ -191,14 +191,14 @@ check-system-opensuse:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check
acceptance-system-opensuse:
extends: .acceptance_test_job_template
avocado-system-opensuse:
extends: .avocado_test_job_template
needs:
- job: build-system-opensuse
artifacts: true
variables:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
@ -317,7 +317,7 @@ clang-user:
# This can be accomplished by using -enable-slirp=git, which avoids the use of
# a system-wide version of the library
#
# Split in three sets of build/check/acceptance to limit the execution time of each
# Split in three sets of build/check/avocado to limit the execution time of each
# job
build-cfi-aarch64:
extends: .native_build_job_template
@ -352,14 +352,14 @@ check-cfi-aarch64:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-aarch64:
extends: .acceptance_test_job_template
avocado-cfi-aarch64:
extends: .avocado_test_job_template
needs:
- job: build-cfi-aarch64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
build-cfi-ppc64-s390x:
extends: .native_build_job_template
@ -394,14 +394,14 @@ check-cfi-ppc64-s390x:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-ppc64-s390x:
extends: .acceptance_test_job_template
avocado-cfi-ppc64-s390x:
extends: .avocado_test_job_template
needs:
- job: build-cfi-ppc64-s390x
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
build-cfi-x86_64:
extends: .native_build_job_template
@ -430,14 +430,14 @@ check-cfi-x86_64:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-cfi-x86_64:
extends: .acceptance_test_job_template
avocado-cfi-x86_64:
extends: .avocado_test_job_template
needs:
- job: build-cfi-x86_64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
MAKE_CHECK_ARGS: check-avocado
tsan-build:
extends: .native_build_job_template
@ -575,7 +575,6 @@ build-without-default-features:
CONFIGURE_ARGS:
--without-default-features
--disable-capstone
--disable-fdt
--disable-pie
--disable-qom-cast-debug
--disable-slirp

View File

@ -35,7 +35,7 @@
-e "s|[@]PIP3@|$PIP3|g"
-e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g"
-e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
-e "s|[@]TEST_TARGETSS@|$TEST_TARGETSS|g"
-e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
<.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml
- cat .gitlab-ci.d/cirrus/$NAME.yml
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml

View File

@ -13,6 +13,7 @@ env:
PYTHON: "@PYTHON@"
MAKE: "@MAKE@"
CONFIGURE_ARGS: "@CONFIGURE_ARGS@"
TEST_TARGETS: "@TEST_TARGETS@"
build_task:
install_script:

View File

@ -134,7 +134,8 @@ ppc64el-debian-cross-container:
riscv64-debian-cross-container:
extends: .container_job_template
stage: containers-layer2
needs: ['amd64-debian10-container']
# as we are currently based on 'sid/unstable' we may break so...
allow_failure: true
variables:
NAME: debian-riscv64-cross

View File

@ -124,6 +124,25 @@ cross-ppc64el-user:
variables:
IMAGE: debian-ppc64el-cross
# The riscv64 cross-builds currently use a 'sid' container to get
# compilers and libraries. Until something more stable is found we
# allow_failure so as not to block CI.
cross-riscv64-system:
extends: .cross_system_build_job
allow_failure: true
needs:
job: riscv64-debian-cross-container
variables:
IMAGE: debian-riscv64-cross
cross-riscv64-user:
extends: .cross_user_build_job
allow_failure: true
needs:
job: riscv64-debian-cross-container
variables:
IMAGE: debian-riscv64-cross
cross-s390x-system:
extends: .cross_system_build_job
needs:

View File

@ -50,7 +50,11 @@ build-edk2:
GIT_DEPTH: 3
script: # Clone the required submodules and build EDK2
- git submodule update --init roms/edk2
- git -C roms/edk2 submodule update --init
- git -C roms/edk2 submodule update --init --
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
BaseTools/Source/C/BrotliCompress/brotli
CryptoPkg/Library/OpensslLib/openssl
MdeModulePkg/Library/BrotliCustomDecompressLib/brotli
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
- echo "=== Using ${JOBS} simultaneous jobs ==="
- make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2

View File

@ -8,7 +8,7 @@ check-patch:
variables:
GIT_DEPTH: 1000
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
- if: '$CI_PROJECT_NAMESPACE == "qemu-project"'
when: never
- when: on_success
allow_failure: true

View File

@ -69,6 +69,7 @@ Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
# git author config, or had utf8/latin1 encoding issues.
Aaron Lindsay <aaron@os.amperecomputing.com>
Alexey Gerasimenko <x1917x@gmail.com>
Alex Chen <alex.chen@huawei.com>
Alex Ivanov <void@aleksoft.net>
Andreas Färber <afaerber@suse.de>
Bandan Das <bsd@redhat.com>
@ -99,9 +100,11 @@ Gautham R. Shenoy <ego@in.ibm.com>
Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Gonglei (Arei) <arei.gonglei@huawei.com>
Guang Wang <wang.guang55@zte.com.cn>
Haibin Zhang <haibinzhang@tencent.com>
Hailiang Zhang <zhang.zhanghailiang@huawei.com>
Hanna Reitz <hreitz@redhat.com> <mreitz@redhat.com>
Hervé Poussineau <hpoussin@reactos.org>
Hyman Huang <huangy81@chinatelecom.cn>
Jakub Jermář <jakub@jermar.eu>
Jakub Jermář <jakub.jermar@kernkonzept.com>
Jean-Christophe Dubois <jcd@tribudubois.net>
@ -135,6 +138,7 @@ Nicholas Thomas <nick@bytemark.co.uk>
Nikunj A Dadhania <nikunj@linux.vnet.ibm.com>
Orit Wasserman <owasserm@redhat.com>
Paolo Bonzini <pbonzini@redhat.com>
Pan Nengyuan <pannengyuan@huawei.com>
Pavel Dovgaluk <dovgaluk@ispras.ru>
Pavel Dovgaluk <pavel.dovgaluk@gmail.com>
Pavel Dovgaluk <Pavel.Dovgaluk@ispras.ru>

View File

@ -305,26 +305,3 @@ jobs:
- CONFIG="--disable-containers --disable-tcg --enable-kvm
--disable-tools --host-cc=clang --cxx=clang++"
- UNRELIABLE=true
# Release builds
# The make-release script expect a QEMU version, so our tag must start with a 'v'.
# This is the case when release candidate tags are created.
- name: "Release tarball"
if: tag IS present AND tag =~ /^v\d+\.\d+(\.\d+)?(-\S*)?$/
env:
# We want to build from the release tarball
- BUILD_DIR="release/build/dir" SRC_DIR="../../.."
- BASE_CONFIG="--prefix=$PWD/dist"
- CONFIG="--target-list=x86_64-softmmu,aarch64-softmmu,armeb-linux-user,ppc-linux-user"
- TEST_CMD="make install -j${JOBS}"
- QEMU_VERSION="${TRAVIS_TAG:1}"
- CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-default"
script:
- make -C ${SRC_DIR} qemu-${QEMU_VERSION}.tar.bz2
- ls -l ${SRC_DIR}/qemu-${QEMU_VERSION}.tar.bz2
- tar -xf ${SRC_DIR}/qemu-${QEMU_VERSION}.tar.bz2 && cd qemu-${QEMU_VERSION}
- mkdir -p release-build && cd release-build
- ../configure ${BASE_CONFIG} ${CONFIG} || { cat config.log meson-logs/meson-log.txt && exit 1; }
- make install
allow_failures:
- env: UNRELIABLE=true

View File

@ -41,3 +41,7 @@ config PVRDMA
config MULTIPROCESS_ALLOWED
bool
imply MULTIPROCESS
config FUZZ
bool
select SPARSE_MEM

View File

@ -109,6 +109,12 @@ K: ^Subject:.*(?i)s390x?
T: git https://gitlab.com/cohuck/qemu.git s390-next
L: qemu-s390x@nongnu.org
MIPS general architecture support
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
S: Odd Fixes
K: ^Subject:.*(?i)mips
Guest CPU cores (TCG)
---------------------
Overall TCG CPUs
@ -171,7 +177,7 @@ L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/smmu*
F: include/hw/arm/smmu*
F: tests/acceptance/smmu.py
F: tests/avocado/smmu.py
AVR TCG CPUs
M: Michael Rolnik <mrolnik@gmail.com>
@ -179,7 +185,7 @@ S: Maintained
F: docs/system/target-avr.rst
F: gdb-xml/avr-cpu.xml
F: target/avr/
F: tests/acceptance/machine_avr6.py
F: tests/avocado/machine_avr6.py
CRIS TCG CPUs
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
@ -205,10 +211,7 @@ HPPA (PA-RISC) TCG CPUs
M: Richard Henderson <richard.henderson@linaro.org>
S: Maintained
F: target/hppa/
F: hw/hppa/
F: disas/hppa.c
F: hw/net/*i82596*
F: include/hw/net/lasi_82596.h
M68K TCG CPUs
M: Laurent Vivier <laurent@vivier.eu>
@ -222,6 +225,8 @@ S: Maintained
F: target/microblaze/
F: hw/microblaze/
F: disas/microblaze.c
F: tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh
F: tests/tcg/nios2/Makefile.target
MIPS TCG CPUs
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
@ -230,19 +235,9 @@ R: Jiaxun Yang <jiaxun.yang@flygoat.com>
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
S: Odd Fixes
F: target/mips/
F: configs/devices/mips*/*
F: disas/mips.c
F: docs/system/cpu-models-mips.rst.inc
F: hw/intc/mips_gic.c
F: hw/mips/
F: hw/misc/mips_*
F: hw/timer/mips_gictimer.c
F: include/hw/intc/mips_gic.h
F: include/hw/mips/
F: include/hw/misc/mips_*
F: include/hw/timer/mips_gictimer.h
F: tests/tcg/mips/
K: ^Subject:.*(?i)mips
MIPS TCG CPUs (nanoMIPS ISA)
S: Orphan
@ -257,6 +252,7 @@ F: target/nios2/
F: hw/nios2/
F: disas/nios2.c
F: configs/devices/nios2-softmmu/default.mak
F: tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh
OpenRISC TCG CPUs
M: Stafford Horne <shorne@gmail.com>
@ -266,13 +262,16 @@ F: hw/openrisc/
F: tests/tcg/openrisc/
PowerPC TCG CPUs
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
M: Cédric Le Goater <clg@kaod.org>
M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Maintained
F: target/ppc/
F: hw/ppc/
F: include/hw/ppc/
F: hw/ppc/ppc.c
F: hw/ppc/ppc_booke.c
F: include/hw/ppc/ppc.h
F: disas/ppc.c
RISC-V TCG CPUs
@ -385,14 +384,15 @@ F: target/mips/kvm*
F: target/mips/sysemu/
PPC KVM CPUs
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
M: Cédric Le Goater <clg@kaod.org>
M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
S: Maintained
F: target/ppc/kvm.c
S390 KVM CPUs
M: Halil Pasic <pasic@linux.ibm.com>
M: Cornelia Huck <cohuck@redhat.com>
M: Christian Borntraeger <borntraeger@de.ibm.com>
S: Supported
F: target/s390x/kvm/
@ -407,7 +407,6 @@ F: hw/intc/s390_flic.c
F: hw/intc/s390_flic_kvm.c
F: include/hw/s390x/s390_flic.h
F: gdb-xml/s390*.xml
T: git https://gitlab.com/cohuck/qemu.git s390-next
T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
@ -416,7 +415,10 @@ M: Paolo Bonzini <pbonzini@redhat.com>
M: Marcelo Tosatti <mtosatti@redhat.com>
L: kvm@vger.kernel.org
S: Supported
F: docs/amd-memory-encryption.txt
F: docs/system/i386/sgx.rst
F: target/i386/kvm/
F: target/i386/sev*
F: scripts/kvm/vmxcap
Guest CPU Cores (other accelerators)
@ -659,7 +661,7 @@ S: Odd Fixes
F: include/hw/arm/digic.h
F: hw/*/digic*
F: include/hw/*/digic*
F: tests/acceptance/machine_arm_canona1100.py
F: tests/avocado/machine_arm_canona1100.py
F: docs/system/arm/digic.rst
Goldfish RTC
@ -710,7 +712,7 @@ S: Maintained
F: hw/arm/integratorcp.c
F: hw/misc/arm_integrator_debug.c
F: include/hw/misc/arm_integrator_debug.h
F: tests/acceptance/machine_arm_integratorcp.py
F: tests/avocado/machine_arm_integratorcp.py
F: docs/system/arm/integratorcp.rst
MCIMX6UL EVK / i.MX6ul
@ -807,7 +809,7 @@ F: include/hw/display/blizzard.h
F: include/hw/input/lm832x.h
F: include/hw/input/tsc2xxx.h
F: include/hw/misc/cbus.h
F: tests/acceptance/machine_arm_n8x0.py
F: tests/avocado/machine_arm_n8x0.py
F: docs/system/arm/nseries.rst
Palm
@ -1097,6 +1099,8 @@ R: Helge Deller <deller@gmx.de>
S: Odd Fixes
F: configs/devices/hppa-softmmu/default.mak
F: hw/hppa/
F: hw/net/*i82596*
F: include/hw/net/lasi_82596.h
F: pc-bios/hppa-firmware.img
M68K Machines
@ -1159,7 +1163,7 @@ M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
S: Maintained
F: hw/microblaze/petalogix_s3adsp1800_mmu.c
F: include/hw/char/xilinx_uartlite.h
F: tests/acceptance/machine_microblaze.py
F: tests/avocado/machine_microblaze.py
petalogix_ml605
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
@ -1168,6 +1172,13 @@ F: hw/microblaze/petalogix_ml605_mmu.c
MIPS Machines
-------------
Overall MIPS Machines
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
S: Odd Fixes
F: configs/devices/mips*/*
F: hw/mips/
F: include/hw/mips/
Jazz
M: Hervé Poussineau <hpoussin@reactos.org>
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
@ -1185,8 +1196,8 @@ F: hw/acpi/piix4.c
F: hw/mips/malta.c
F: hw/mips/gt64xxx_pci.c
F: include/hw/southbridge/piix.h
F: tests/acceptance/linux_ssh_mips_malta.py
F: tests/acceptance/machine_mips_malta.py
F: tests/avocado/linux_ssh_mips_malta.py
F: tests/avocado/machine_mips_malta.py
Mipssim
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
@ -1204,7 +1215,7 @@ F: hw/isa/vt82c686.c
F: hw/pci-host/bonito.c
F: hw/usb/vt82c686-uhci-pci.c
F: include/hw/isa/vt82c686.h
F: tests/acceptance/machine_mips_fuloong2e.py
F: tests/avocado/machine_mips_fuloong2e.py
Loongson-3 virtual platforms
M: Huacai Chen <chenhuacai@kernel.org>
@ -1214,7 +1225,7 @@ F: hw/intc/loongson_liointc.c
F: hw/mips/loongson3_bootp.c
F: hw/mips/loongson3_bootp.h
F: hw/mips/loongson3_virt.c
F: tests/acceptance/machine_mips_loongson3v.py
F: tests/avocado/machine_mips_loongson3v.py
Boston
M: Paul Burton <paulburton@kernel.org>
@ -1235,24 +1246,19 @@ F: hw/openrisc/openrisc_sim.c
PowerPC Machines
----------------
405
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Odd Fixes
S: Orphan
F: hw/ppc/ppc405_boards.c
Bamboo
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Odd Fixes
S: Orphan
F: hw/ppc/ppc440_bamboo.c
F: tests/avocado/ppc_bamboo.py
e500
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Odd Fixes
S: Orphan
F: hw/ppc/e500*
F: hw/gpio/mpc8xxx.c
F: hw/i2c/mpc_i2c.c
@ -1261,20 +1267,18 @@ F: hw/pci-host/ppce500.c
F: include/hw/ppc/ppc_e500.h
F: include/hw/pci-host/ppce500.h
F: pc-bios/u-boot.e500
F: hw/intc/openpic_kvm.h
F: include/hw/ppc/openpic_kvm.h
mpc8544ds
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Odd Fixes
S: Orphan
F: hw/ppc/mpc8544ds.c
F: hw/ppc/mpc8544_guts.c
F: tests/acceptance/ppc_mpc8544ds.py
F: tests/avocado/ppc_mpc8544ds.py
New World (mac99)
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/mac_newworld.c
@ -1293,8 +1297,6 @@ F: pc-bios/qemu_vga.ndrv
Old World (g3beige)
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/mac_oldworld.c
@ -1308,8 +1310,6 @@ F: pc-bios/qemu_vga.ndrv
PReP
M: Hervé Poussineau <hpoussin@reactos.org>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ppc/prep.c
@ -1322,13 +1322,15 @@ F: hw/dma/i82374.c
F: hw/rtc/m48t59-isa.c
F: include/hw/isa/pc87312.h
F: include/hw/rtc/m48t59.h
F: tests/acceptance/ppc_prep_40p.py
F: tests/avocado/ppc_prep_40p.py
sPAPR
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
M: Cédric Le Goater <clg@kaod.org>
M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Supported
S: Maintained
F: hw/*/spapr*
F: include/hw/*/spapr*
F: hw/*/xics*
@ -1340,12 +1342,10 @@ F: tests/qtest/spapr*
F: tests/qtest/libqos/*spapr*
F: tests/qtest/rtas*
F: tests/qtest/libqos/rtas*
F: tests/acceptance/ppc_pseries.py
F: tests/avocado/ppc_pseries.py
PowerNV (Non-Virtualized)
M: Cédric Le Goater <clg@kaod.org>
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ppc/pnv*
@ -1362,12 +1362,10 @@ M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/virtex_ml507.c
F: tests/acceptance/ppc_virtex_ml507.py
F: tests/avocado/ppc_virtex_ml507.py
sam460ex
M: BALATON Zoltan <balaton@eik.bme.hu>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ppc/sam460ex.c
@ -1381,7 +1379,6 @@ F: roms/u-boot-sam460ex
pegasos2
M: BALATON Zoltan <balaton@eik.bme.hu>
R: David Gibson <david@gibson.dropbear.id.au>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ppc/pegasos2.c
@ -1391,6 +1388,8 @@ F: include/hw/pci-host/mv64361.h
Virtual Open Firmware (VOF)
M: Alexey Kardashevskiy <aik@ozlabs.ru>
R: Cédric Le Goater <clg@kaod.org>
R: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
@ -1452,7 +1451,7 @@ R: Yoshinori Sato <ysato@users.sourceforge.jp>
S: Orphan
F: docs/system/target-rx.rst
F: hw/rx/rx-gdbsim.c
F: tests/acceptance/machine_rx_gdbsim.py
F: tests/avocado/machine_rx_gdbsim.py
SH4 Machines
------------
@ -1506,7 +1505,7 @@ F: include/hw/pci-host/sabre.h
F: hw/pci-bridge/simba.c
F: include/hw/pci-bridge/simba.h
F: pc-bios/openbios-sparc64
F: tests/acceptance/machine_sparc64_sun4u.py
F: tests/avocado/machine_sparc64_sun4u.py
Sun4v
M: Artyom Tarasenko <atar4qemu@gmail.com>
@ -1522,12 +1521,11 @@ S: Maintained
F: hw/sparc/leon3.c
F: hw/*/grlib*
F: include/hw/*/grlib*
F: tests/acceptance/machine_sparc_leon3.py
F: tests/avocado/machine_sparc_leon3.py
S390 Machines
-------------
S390 Virtio-ccw
M: Cornelia Huck <cohuck@redhat.com>
M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@de.ibm.com>
S: Supported
@ -1538,8 +1536,7 @@ F: include/hw/s390x/
F: hw/watchdog/wdt_diag288.c
F: include/hw/watchdog/wdt_diag288.h
F: configs/devices/s390x-softmmu/default.mak
F: tests/acceptance/machine_s390_ccw_virtio.py
T: git https://gitlab.com/cohuck/qemu.git s390-next
F: tests/avocado/machine_s390_ccw_virtio.py
T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
@ -1625,7 +1622,7 @@ microvm
M: Sergio Lopez <slp@redhat.com>
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: docs/microvm.rst
F: docs/system/i386/microvm.rst
F: hw/i386/microvm.c
F: include/hw/i386/microvm.h
F: pc-bios/bios-microvm.bin
@ -1633,11 +1630,13 @@ F: pc-bios/bios-microvm.bin
Machine core
M: Eduardo Habkost <ehabkost@redhat.com>
M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
R: Philippe Mathieu-Daudé <philmd@redhat.com>
S: Supported
F: cpu.c
F: hw/core/cpu.c
F: hw/core/machine-qmp-cmds.c
F: hw/core/machine.c
F: hw/core/machine-smp.c
F: hw/core/null-machine.c
F: hw/core/numa.c
F: hw/cpu/cluster.c
@ -1647,6 +1646,7 @@ F: include/hw/boards.h
F: include/hw/core/cpu.h
F: include/hw/cpu/cluster.h
F: include/sysemu/numa.h
F: tests/unit/test-smp-parse.c
T: git https://gitlab.com/ehabkost/qemu.git machine-next
Xtensa Machines
@ -1785,9 +1785,8 @@ F: include/hw/acpi/ghes.h
F: docs/specs/acpi_hest_ghes.rst
ppc4xx
M: David Gibson <david@gibson.dropbear.id.au>
L: qemu-ppc@nongnu.org
S: Odd Fixes
S: Orphan
F: hw/ppc/ppc4*.c
F: hw/i2c/ppc4xx_i2c.c
F: include/hw/ppc/ppc4xx.h
@ -1879,7 +1878,6 @@ F: docs/igd-assign.txt
F: docs/devel/vfio-migration.rst
vfio-ccw
M: Cornelia Huck <cohuck@redhat.com>
M: Eric Farman <farman@linux.ibm.com>
M: Matthew Rosato <mjrosato@linux.ibm.com>
S: Supported
@ -1887,7 +1885,6 @@ F: hw/vfio/ccw.c
F: hw/s390x/s390-ccw.c
F: include/hw/s390x/s390-ccw.h
F: include/hw/s390x/vfio-ccw.h
T: git https://gitlab.com/cohuck/qemu.git s390-next
L: qemu-s390x@nongnu.org
vfio-ap
@ -2123,7 +2120,7 @@ M: Alex Bennée <alex.bennee@linaro.org>
S: Maintained
F: hw/core/guest-loader.c
F: docs/system/guest-loader.rst
F: tests/acceptance/boot_xen.py
F: tests/avocado/boot_xen.py
Intel Hexadecimal Object File Loader
M: Su Hang <suhang16@mails.ucas.ac.cn>
@ -2242,8 +2239,6 @@ T: git https://github.com/philmd/qemu.git fw_cfg-next
XIVE
M: Cédric Le Goater <clg@kaod.org>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Supported
F: hw/*/*xive*
@ -2279,6 +2274,26 @@ F: net/can/*
F: hw/net/can/*
F: include/net/can_*.h
OpenPIC interrupt controller
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
S: Odd Fixes
F: hw/intc/openpic.c
F: include/hw/ppc/openpic.h
MIPS CPS
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
S: Odd Fixes
F: hw/misc/mips_*
F: include/hw/misc/mips_*
MIPS GIC
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
S: Odd Fixes
F: hw/intc/mips_gic.c
F: hw/timer/mips_gictimer.c
F: include/hw/intc/mips_gic.h
F: include/hw/timer/mips_gictimer.h
Subsystems
----------
Overall Audio backends
@ -2297,11 +2312,13 @@ F: qapi/audio.json
ALSA Audio backend
M: Gerd Hoffmann <kraxel@redhat.com>
R: Christian Schoenebeck <qemu_oss@crudebyte.com>
S: Odd Fixes
F: audio/alsaaudio.c
Core Audio framework backend
M: Gerd Hoffmann <kraxel@redhat.com>
R: Christian Schoenebeck <qemu_oss@crudebyte.com>
S: Odd Fixes
F: audio/coreaudio.c
@ -2312,6 +2329,7 @@ F: audio/dsound*
JACK Audio Connection Kit backend
M: Gerd Hoffmann <kraxel@redhat.com>
R: Christian Schoenebeck <qemu_oss@crudebyte.com>
S: Odd Fixes
F: audio/jackaudio.c
@ -2327,6 +2345,7 @@ F: audio/paaudio.c
SDL Audio backend
M: Gerd Hoffmann <kraxel@redhat.com>
R: Thomas Huth <huth@tuxfamily.org>
S: Odd Fixes
F: audio/sdlaudio.c
@ -2515,6 +2534,7 @@ Memory API
M: Paolo Bonzini <pbonzini@redhat.com>
M: Peter Xu <peterx@redhat.com>
M: David Hildenbrand <david@redhat.com>
R: Philippe Mathieu-Daudé <philmd@redhat.com>
S: Supported
F: include/exec/ioport.h
F: include/exec/memop.h
@ -2974,9 +2994,9 @@ F: net/filter-replay.c
F: include/sysemu/replay.h
F: docs/replay.txt
F: stubs/replay.c
F: tests/acceptance/replay_kernel.py
F: tests/acceptance/replay_linux.py
F: tests/acceptance/reverse_debugging.py
F: tests/avocado/replay_kernel.py
F: tests/avocado/replay_linux.py
F: tests/avocado/reverse_debugging.py
F: qapi/replay.json
IOVA Tree
@ -3093,7 +3113,7 @@ S: Maintained
F: docs/devel/tcg-plugins.rst
F: plugins/
F: tests/plugin/
F: tests/acceptance/tcg_plugins.py
F: tests/avocado/tcg_plugins.py
F: contrib/plugins/
AArch64 TCG target
@ -3482,14 +3502,14 @@ S: Maintained
F: tests/tcg/Makefile
F: tests/tcg/Makefile.include
Acceptance (Integration) Testing with the Avocado framework
Integration Testing with the Avocado framework
W: https://trello.com/b/6Qi1pxVn/avocado-qemu
R: Cleber Rosa <crosa@redhat.com>
R: Philippe Mathieu-Daudé <philmd@redhat.com>
R: Wainer dos Santos Moschetta <wainersm@redhat.com>
R: Willian Rampazzo <willianr@redhat.com>
S: Odd Fixes
F: tests/acceptance/
F: tests/avocado/
Documentation
-------------

View File

@ -87,7 +87,7 @@ x := $(shell rm -rf meson-private meson-info meson-logs)
endif
# 1. ensure config-host.mak is up-to-date
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/pc-bios $(SRC_PATH)/VERSION
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/pc-bios $(SRC_PATH)/VERSION
@echo config-host.mak is out-of-date, running configure
@if test -f meson-private/coredata.dat; then \
./config.status --skip-meson; \
@ -124,6 +124,12 @@ ifneq ($(MESON),)
Makefile.mtest: build.ninja scripts/mtest2make.py
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
-include Makefile.mtest
.PHONY: update-buildoptions
all update-buildoptions: $(SRC_PATH)/scripts/meson-buildoptions.sh
$(SRC_PATH)/scripts/meson-buildoptions.sh: $(SRC_PATH)/meson_options.txt
$(MESON) introspect --buildoptions $(SRC_PATH)/meson.build | $(PYTHON) \
scripts/meson-buildoptions.py > $@.tmp && mv $@.tmp $@
endif
# 4. Rules to bridge to other makefiles
@ -229,7 +235,8 @@ distclean: clean
rm -f linux-headers/asm
rm -Rf .sdk
find-src-path = find "$(SRC_PATH)/" -path "$(SRC_PATH)/meson" -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)
find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \
-type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)
.PHONY: ctags
ctags:
@ -250,7 +257,7 @@ gtags:
"GTAGS", "Remove old $@ files")
$(call quiet-command, \
(cd $(SRC_PATH) && \
$(find-src-path) | gtags -f -), \
$(find-src-path) -print | gtags -f -), \
"GTAGS", "Re-index $(SRC_PATH)")
.PHONY: TAGS

View File

@ -59,9 +59,9 @@ of other UNIX targets. The simple steps to build QEMU are:
Additional information can also be found online via the QEMU website:
* `<https://qemu.org/Hosts/Linux>`_
* `<https://qemu.org/Hosts/Mac>`_
* `<https://qemu.org/Hosts/W32>`_
* `<https://wiki.qemu.org/Hosts/Linux>`_
* `<https://wiki.qemu.org/Hosts/Mac>`_
* `<https://wiki.qemu.org/Hosts/W32>`_
Submitting patches
@ -84,8 +84,8 @@ the Developers Guide.
Additional information on submitting patches can be found online via
the QEMU website
* `<https://qemu.org/Contribute/SubmitAPatch>`_
* `<https://qemu.org/Contribute/TrivialPatches>`_
* `<https://wiki.qemu.org/Contribute/SubmitAPatch>`_
* `<https://wiki.qemu.org/Contribute/TrivialPatches>`_
The QEMU website is also maintained under source control.
@ -144,7 +144,7 @@ reported via GitLab.
For additional information on bug reporting consult:
* `<https://qemu.org/Contribute/ReportABug>`_
* `<https://wiki.qemu.org/Contribute/ReportABug>`_
ChangeLog
@ -168,4 +168,4 @@ main methods being email and IRC
Information on additional methods of contacting the community can be
found online via the QEMU website:
* `<https://qemu.org/Contribute/StartHere>`_
* `<https://wiki.qemu.org/Contribute/StartHere>`_

View File

@ -1 +1 @@
6.1.50
6.1.90

View File

@ -122,6 +122,7 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
MemoryRegion *area = section->mr;
bool writeable = !area->readonly && !area->rom_device;
hv_memory_flags_t flags;
uint64_t page_size = qemu_real_host_page_size;
if (!memory_region_is_ram(area)) {
if (writeable) {
@ -135,6 +136,12 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
}
}
if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
!QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
/* Not page aligned, so we can not map as RAM */
add = false;
}
mem = hvf_find_overlap_slot(
section->offset_within_address_space,
int128_get64(section->size));
@ -295,6 +302,7 @@ static void hvf_region_del(MemoryListener *listener,
}
static MemoryListener hvf_memory_listener = {
.name = "hvf",
.priority = 10,
.region_add = hvf_region_add,
.region_del = hvf_region_del,
@ -320,7 +328,7 @@ static int hvf_accel_init(MachineState *ms)
s = g_new0(HVFState, 1);
s->num_slots = 32;
s->num_slots = ARRAY_SIZE(s->slots);
for (x = 0; x < s->num_slots; ++x) {
s->slots[x].size = 0;
s->slots[x].slot_id = x;

View File

@ -469,6 +469,7 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
cpu->kvm_fd = ret;
cpu->kvm_state = s;
cpu->vcpu_dirty = true;
cpu->dirty_pages = 0;
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
@ -743,6 +744,7 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
count++;
}
cpu->kvm_fetch_index = fetch;
cpu->dirty_pages += count;
return count;
}
@ -1129,6 +1131,7 @@ static void kvm_coalesce_pio_del(MemoryListener *listener,
}
static MemoryListener kvm_coalesced_pio_listener = {
.name = "kvm-coalesced-pio",
.coalesced_io_add = kvm_coalesce_pio_add,
.coalesced_io_del = kvm_coalesce_pio_del,
};
@ -1633,7 +1636,7 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener,
}
void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
AddressSpace *as, int as_id)
AddressSpace *as, int as_id, const char *name)
{
int i;
@ -1649,6 +1652,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
kml->listener.log_start = kvm_log_start;
kml->listener.log_stop = kvm_log_stop;
kml->listener.priority = 10;
kml->listener.name = name;
if (s->kvm_dirty_ring_size) {
kml->listener.log_sync_global = kvm_log_sync_global;
@ -1669,6 +1673,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
}
static MemoryListener kvm_io_listener = {
.name = "kvm-io",
.eventfd_add = kvm_io_ioeventfd_add,
.eventfd_del = kvm_io_ioeventfd_del,
.priority = 10,
@ -2293,6 +2298,11 @@ bool kvm_vcpu_id_is_valid(int vcpu_id)
return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
}
bool kvm_dirty_ring_enabled(void)
{
return kvm_state->kvm_dirty_ring_size ? true : false;
}
static int kvm_init(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@ -2579,7 +2589,7 @@ static int kvm_init(MachineState *ms)
s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
kvm_memory_listener_register(s, &s->memory_listener,
&address_space_memory, 0);
&address_space_memory, 0, "kvm-memory");
if (kvm_eventfds_allowed) {
memory_listener_register(&kvm_io_listener,
&address_space_io);

View File

@ -3,6 +3,5 @@ kvm_ss.add(files(
'kvm-all.c',
'kvm-accel-ops.c',
))
kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c'))
specific_ss.add_all(when: 'CONFIG_KVM', if_true: kvm_ss)

View File

@ -147,4 +147,9 @@ bool kvm_arm_supports_user_irq(void)
{
return false;
}
bool kvm_dirty_ring_enabled(void)
{
return false;
}
#endif

View File

@ -13,56 +13,43 @@
* See the COPYING file in the top-level directory.
*/
static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi)
static void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
CPUState *cpu = env_cpu(env);
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
trace_guest_mem_before_exec(cpu, addr, info);
trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST);
return info;
trace_guest_rmw_before_exec(cpu, addr, oi);
}
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
uint16_t info)
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}
#if HAVE_ATOMIC128
static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi)
static void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
trace_guest_mem_before_exec(env_cpu(env), addr, info);
return info;
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
}
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
uint16_t info)
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi)
static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true);
trace_guest_mem_before_exec(env_cpu(env), addr, info);
return info;
trace_guest_st_before_exec(env_cpu(env), addr, oi);
}
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
uint16_t info)
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
#endif

View File

@ -19,7 +19,6 @@
*/
#include "qemu/plugin.h"
#include "trace/mem.h"
#if DATA_SIZE == 16
# define SUFFIX o
@ -72,77 +71,77 @@
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
atomic_trace_rmw_pre(env, addr, oi);
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, cmpv, newv);
#else
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
#endif
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info);
atomic_trace_rmw_post(env, addr, oi);
return ret;
}
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
uint16_t info = atomic_trace_ld_pre(env, addr, oi);
atomic_trace_ld_pre(env, addr, oi);
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, info);
atomic_trace_ld_post(env, addr, oi);
return val;
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
uint16_t info = atomic_trace_st_pre(env, addr, oi);
atomic_trace_st_pre(env, addr, oi);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, info);
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
atomic_trace_rmw_pre(env, addr, oi);
ret = qatomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info);
atomic_trace_rmw_post(env, addr, oi);
return ret;
}
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
atomic_trace_rmw_pre(env, addr, oi); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \
atomic_trace_rmw_post(env, addr, oi); \
return ret; \
}
@ -167,12 +166,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
atomic_trace_rmw_pre(env, addr, oi); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
@ -180,7 +179,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
} while (cmp != old); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \
atomic_trace_rmw_post(env, addr, oi); \
return RET; \
}
@ -211,78 +210,78 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
atomic_trace_rmw_pre(env, addr, oi);
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
#else
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
#endif
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info);
atomic_trace_rmw_post(env, addr, oi);
return BSWAP(ret);
}
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
uint16_t info = atomic_trace_ld_pre(env, addr, oi);
atomic_trace_ld_pre(env, addr, oi);
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, info);
atomic_trace_ld_post(env, addr, oi);
return BSWAP(val);
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
uint16_t info = atomic_trace_st_pre(env, addr, oi);
atomic_trace_st_pre(env, addr, oi);
val = BSWAP(val);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, info);
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
ABI_TYPE ret;
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
atomic_trace_rmw_pre(env, addr, oi);
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info);
atomic_trace_rmw_post(env, addr, oi);
return BSWAP(ret);
}
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
atomic_trace_rmw_pre(env, addr, oi); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \
atomic_trace_rmw_post(env, addr, oi); \
return BSWAP(ret); \
}
@ -304,12 +303,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
atomic_trace_rmw_pre(env, addr, oi); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
@ -317,7 +316,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
} while (ldo != ldn); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \
atomic_trace_rmw_post(env, addr, oi); \
return RET; \
}

View File

@ -20,6 +20,9 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/qemu-print.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/type-helpers.h"
#include "hw/core/tcg-cpu-ops.h"
#include "trace.h"
#include "disas/disas.h"
@ -38,6 +41,7 @@
#include "exec/cpu-all.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/replay.h"
#include "sysemu/tcg.h"
#include "exec/helper-proto.h"
#include "tb-hash.h"
#include "tb-context.h"
@ -383,6 +387,17 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
cc->set_pc(cpu, last_tb->pc);
}
}
/*
* If gdb single-step, and we haven't raised another exception,
* raise a debug exception. Single-step with another exception
* is handled in cpu_handle_exception.
*/
if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(cpu);
}
return last_tb;
}
@ -451,6 +466,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
* memory.
*/
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
tcg_debug_assert(!have_mmap_lock());
#endif
if (qemu_mutex_iothread_locked()) {
@ -460,7 +476,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
qemu_plugin_disable_mem_helpers(cpu);
}
/*
* As we start the exclusive region before codegen we must still
* be in the region if we longjump out of either the codegen or
@ -925,6 +940,7 @@ int cpu_exec(CPUState *cpu)
#endif
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
tcg_debug_assert(!have_mmap_lock());
#endif
if (qemu_mutex_iothread_locked()) {
@ -1057,23 +1073,52 @@ void tcg_exec_unrealizefn(CPUState *cpu)
#ifndef CONFIG_USER_ONLY
void dump_drift_info(void)
void dump_drift_info(GString *buf)
{
if (!icount_enabled()) {
return;
}
qemu_printf("Host - Guest clock %"PRIi64" ms\n",
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
(cpu_get_clock() - icount_get()) / SCALE_MS);
if (icount_align_option) {
qemu_printf("Max guest delay %"PRIi64" ms\n",
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
-max_delay / SCALE_MS);
qemu_printf("Max guest advance %"PRIi64" ms\n",
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
max_advance / SCALE_MS);
} else {
qemu_printf("Max guest delay NA\n");
qemu_printf("Max guest advance NA\n");
g_string_append_printf(buf, "Max guest delay NA\n");
g_string_append_printf(buf, "Max guest advance NA\n");
}
}
HumanReadableText *qmp_x_query_jit(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "JIT information is only available with accel=tcg");
return NULL;
}
dump_exec_info(buf);
dump_drift_info(buf);
return human_readable_text_from_str(buf);
}
HumanReadableText *qmp_x_query_opcount(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "Opcode count information is only available with accel=tcg");
return NULL;
}
dump_opcount_info(buf);
return human_readable_text_from_str(buf);
}
#endif /* !CONFIG_USER_ONLY */

View File

@ -34,12 +34,12 @@
#include "qemu/atomic128.h"
#include "exec/translate-all.h"
#include "trace/trace-root.h"
#include "trace/mem.h"
#include "tb-hash.h"
#include "internal.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
#include "tcg/tcg-ldst.h"
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
@ -1749,7 +1749,7 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, int size, int prot,
MemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{
size_t mmu_idx = get_mmuidx(oi);
@ -1840,6 +1840,25 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
cpu_loop_exit_atomic(env_cpu(env), retaddr);
}
/*
* Verify that we have passed the correct MemOp to the correct function.
*
* In the case of the helper_*_mmu functions, we will have done this by
* using the MemOp to look up the helper during code generation.
*
* In the case of the cpu_*_mmu functions, this is up to the caller.
* We could present one function to target code, and dispatch based on
* the MemOp, but so far we have worked hard to avoid an indirect function
* call along the memory path.
*/
static void validate_memop(MemOpIdx oi, MemOp expected)
{
#ifdef CONFIG_DEBUG_TCG
MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
assert(have == expected);
#endif
}
/*
* Load Helpers
*
@ -1850,7 +1869,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
*/
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
MemOpIdx oi, uintptr_t retaddr);
static inline uint64_t QEMU_ALWAYS_INLINE
load_memop(const void *haddr, MemOp op)
@ -1876,7 +1895,7 @@ load_memop(const void *haddr, MemOp op)
}
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
FullLoadHelper *full_load)
{
@ -1991,79 +2010,86 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
*/
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_UB);
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return full_ldub_mmu(env, addr, oi, retaddr);
}
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUW);
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
full_le_lduw_mmu);
}
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return full_le_lduw_mmu(env, addr, oi, retaddr);
}
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUW);
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
full_be_lduw_mmu);
}
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return full_be_lduw_mmu(env, addr, oi, retaddr);
}
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUL);
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
full_le_ldul_mmu);
}
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return full_le_ldul_mmu(env, addr, oi, retaddr);
}
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUL);
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
full_be_ldul_mmu);
}
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return full_be_ldul_mmu(env, addr, oi, retaddr);
}
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEQ);
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
helper_le_ldq_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEQ);
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
helper_be_ldq_mmu);
}
@ -2075,31 +2101,31 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
}
@ -2109,193 +2135,56 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
*/
static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t retaddr,
MemOp op, FullLoadHelper *full_load)
MemOpIdx oi, uintptr_t retaddr,
FullLoadHelper *full_load)
{
uint16_t meminfo;
TCGMemOpIdx oi;
uint64_t ret;
meminfo = trace_mem_get_info(op, mmu_idx, false);
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
op &= ~MO_SIGN;
oi = make_memop_idx(op, mmu_idx);
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
ret = full_load(env, addr, oi, retaddr);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
}
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
full_ldub_mmu);
return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
}
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu);
return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
}
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
full_be_lduw_mmu);
return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu);
}
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu);
return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
}
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu);
return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
}
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu);
}
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
full_le_lduw_mmu);
}
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
}
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
}
uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldub_data_ra(env, ptr, 0);
}
int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldsb_data_ra(env, ptr, 0);
}
uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
{
return cpu_lduw_be_data_ra(env, ptr, 0);
}
int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldsw_be_data_ra(env, ptr, 0);
}
uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldl_be_data_ra(env, ptr, 0);
}
uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldq_be_data_ra(env, ptr, 0);
}
uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
{
return cpu_lduw_le_data_ra(env, ptr, 0);
}
int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldsw_le_data_ra(env, ptr, 0);
}
uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldl_le_data_ra(env, ptr, 0);
}
uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldq_le_data_ra(env, ptr, 0);
return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
}
/*
@ -2332,6 +2221,9 @@ store_memop(void *haddr, uint64_t val, MemOp op)
}
}
static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr);
static void __attribute__((noinline))
store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
@ -2341,7 +2233,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
uintptr_t index, index2;
CPUTLBEntry *entry, *entry2;
target_ulong page2, tlb_addr, tlb_addr2;
TCGMemOpIdx oi;
MemOpIdx oi;
size_t size2;
int i;
@ -2395,20 +2287,20 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
for (i = 0; i < size; ++i) {
/* Big-endian extract. */
uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
full_stb_mmu(env, addr + i, val8, oi, retaddr);
}
} else {
for (i = 0; i < size; ++i) {
/* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
full_stb_mmu(env, addr + i, val8, oi, retaddr);
}
}
}
static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
MemOpIdx oi, uintptr_t retaddr, MemOp op)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@ -2504,46 +2396,83 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
store_memop(haddr, val, op);
}
void __attribute__((noinline))
helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
static void __attribute__((noinline))
full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_UB);
store_helper(env, addr, val, oi, retaddr, MO_UB);
}
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_stb_mmu(env, addr, val, oi, retaddr);
}
static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUW);
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
}
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_le_stw_mmu(env, addr, val, oi, retaddr);
}
static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUW);
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
}
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_be_stw_mmu(env, addr, val, oi, retaddr);
}
static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUL);
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
}
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_le_stl_mmu(env, addr, val, oi, retaddr);
}
static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUL);
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
}
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_be_stl_mmu(env, addr, val, oi, retaddr);
}
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEQ);
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEQ);
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
}
@ -2551,140 +2480,61 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
* Store Helpers for cpu_ldst.h
*/
static inline void QEMU_ALWAYS_INLINE
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr, MemOp op)
typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
uint64_t val, MemOpIdx oi, uintptr_t retaddr);
static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
uint64_t val, MemOpIdx oi, uintptr_t ra,
FullStoreHelper *full_store)
{
TCGMemOpIdx oi;
uint16_t meminfo;
meminfo = trace_mem_get_info(op, mmu_idx, true);
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
oi = make_memop_idx(op, mmu_idx);
store_helper(env, addr, val, oi, retaddr, op);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
trace_guest_st_before_exec(env_cpu(env), addr, oi);
full_store(env, addr, val, oi, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr)
void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
}
void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr)
void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW);
cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
}
void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr)
void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL);
cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
}
void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr)
void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ);
cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
}
void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr)
void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW);
cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
}
void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr)
void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL);
cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
}
void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr)
void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ);
cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
}
void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
uint64_t val, uintptr_t retaddr)
{
cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
uint64_t val, uintptr_t retaddr)
{
cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stb_data_ra(env, ptr, val, 0);
}
void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stw_be_data_ra(env, ptr, val, 0);
}
void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stl_be_data_ra(env, ptr, val, 0);
}
void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
{
cpu_stq_be_data_ra(env, ptr, val, 0);
}
void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stw_le_data_ra(env, ptr, val, 0);
}
void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stl_le_data_ra(env, ptr, val, 0);
}
void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
{
cpu_stq_le_data_ra(env, ptr, val, 0);
}
#include "ldst_common.c.inc"
/*
* First set of functions passes in OI and RETADDR.
@ -2721,49 +2571,49 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
/* Code access functions. */
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
}
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
return full_ldub_code(env, addr, oi, 0);
}
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
return full_lduw_code(env, addr, oi, 0);
}
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
return full_ldl_code(env, addr, oi, 0);
}
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
return full_ldq_code(env, addr, oi, 0);
}

View File

@ -1,29 +1,15 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "exec/exec-all.h"
#include "monitor/monitor.h"
#include "sysemu/tcg.h"
static void hmp_info_jit(Monitor *mon, const QDict *qdict)
{
if (!tcg_enabled()) {
error_report("JIT information is only available with accel=tcg");
return;
}
dump_exec_info();
dump_drift_info();
}
static void hmp_info_opcount(Monitor *mon, const QDict *qdict)
{
dump_opcount_info();
}
static void hmp_tcg_register(void)
{
monitor_register_hmp("jit", true, hmp_info_jit);
monitor_register_hmp("opcount", true, hmp_info_opcount);
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);

307
accel/tcg/ldst_common.c.inc Normal file
View File

@ -0,0 +1,307 @@
/*
* Routines common to user and system emulation of load/store.
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: GPL-2.0-or-later
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
return cpu_ldb_mmu(env, addr, oi, ra);
}
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
return cpu_ldw_be_mmu(env, addr, oi, ra);
}
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
return cpu_ldl_be_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
return cpu_ldq_be_mmu(env, addr, oi, ra);
}
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
return cpu_ldw_le_mmu(env, addr, oi, ra);
}
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
return cpu_ldl_le_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
return cpu_ldq_le_mmu(env, addr, oi, ra);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
cpu_stb_mmu(env, addr, val, oi, ra);
}
void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
cpu_stw_be_mmu(env, addr, val, oi, ra);
}
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
cpu_stl_be_mmu(env, addr, val, oi, ra);
}
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
cpu_stq_be_mmu(env, addr, val, oi, ra);
}
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
cpu_stw_le_mmu(env, addr, val, oi, ra);
}
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
cpu_stl_le_mmu(env, addr, val, oi, ra);
}
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
cpu_stq_le_mmu(env, addr, val, oi, ra);
}
/*--------------------------*/
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return (int8_t)cpu_ldub_data_ra(env, addr, ra);
}
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
}
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
}
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, uintptr_t ra)
{
cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, uintptr_t ra)
{
cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
/*--------------------------*/
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldub_data_ra(env, addr, 0);
}
int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
{
return (int8_t)cpu_ldub_data(env, addr);
}
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
{
return cpu_lduw_be_data_ra(env, addr, 0);
}
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
{
return (int16_t)cpu_lduw_be_data(env, addr);
}
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldl_be_data_ra(env, addr, 0);
}
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldq_be_data_ra(env, addr, 0);
}
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
{
return cpu_lduw_le_data_ra(env, addr, 0);
}
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
{
return (int16_t)cpu_lduw_le_data(env, addr);
}
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldl_le_data_ra(env, addr, 0);
}
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldq_le_data_ra(env, addr, 0);
}
void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stb_data_ra(env, addr, val, 0);
}
void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stw_be_data_ra(env, addr, val, 0);
}
void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stl_be_data_ra(env, addr, val, 0);
}
void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
{
cpu_stq_be_data_ra(env, addr, val, 0);
}
void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stw_le_data_ra(env, addr, val, 0);
}
void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stl_le_data_ra(env, addr, val, 0);
}
void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
{
cpu_stq_le_data_ra(env, addr, val, 0);
}

View File

@ -45,7 +45,6 @@
#include "qemu/osdep.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "trace/mem.h"
#include "exec/exec-all.h"
#include "exec/plugin-gen.h"
#include "exec/translator.h"
@ -163,11 +162,7 @@ static void gen_empty_mem_helper(void)
static void gen_plugin_cb_start(enum plugin_gen_from from,
enum plugin_gen_cb type, unsigned wr)
{
TCGOp *op;
tcg_gen_plugin_cb_start(from, type, wr);
op = tcg_last_op();
QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link);
}
static void gen_wrapped(enum plugin_gen_from from,
@ -211,9 +206,9 @@ static void gen_mem_wrapped(enum plugin_gen_cb type,
const union mem_gen_fn *f, TCGv addr,
uint32_t info, bool is_mem)
{
int wr = !!(info & TRACE_MEM_ST);
enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr);
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
if (is_mem) {
f->mem_fn(addr, info);
} else {
@ -707,62 +702,6 @@ static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb,
inject_mem_disable_helper(insn, begin_op);
}
static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op,
int insn_idx)
{
enum plugin_gen_from from = begin_op->args[0];
enum plugin_gen_cb type = begin_op->args[1];
switch (from) {
case PLUGIN_GEN_FROM_TB:
switch (type) {
case PLUGIN_GEN_CB_UDATA:
plugin_gen_tb_udata(ptb, begin_op);
return;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_tb_inline(ptb, begin_op);
return;
default:
g_assert_not_reached();
}
case PLUGIN_GEN_FROM_INSN:
switch (type) {
case PLUGIN_GEN_CB_UDATA:
plugin_gen_insn_udata(ptb, begin_op, insn_idx);
return;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_insn_inline(ptb, begin_op, insn_idx);
return;
case PLUGIN_GEN_ENABLE_MEM_HELPER:
plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx);
return;
default:
g_assert_not_reached();
}
case PLUGIN_GEN_FROM_MEM:
switch (type) {
case PLUGIN_GEN_CB_MEM:
plugin_gen_mem_regular(ptb, begin_op, insn_idx);
return;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_mem_inline(ptb, begin_op, insn_idx);
return;
default:
g_assert_not_reached();
}
case PLUGIN_GEN_AFTER_INSN:
switch (type) {
case PLUGIN_GEN_DISABLE_MEM_HELPER:
plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx);
return;
default:
g_assert_not_reached();
}
default:
g_assert_not_reached();
}
}
/* #define DEBUG_PLUGIN_GEN_OPS */
static void pr_ops(void)
{
@ -820,21 +759,95 @@ static void pr_ops(void)
static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
{
TCGOp *op;
int insn_idx;
int insn_idx = -1;
pr_ops();
insn_idx = -1;
QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) {
QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
switch (op->opc) {
case INDEX_op_insn_start:
insn_idx++;
break;
case INDEX_op_plugin_cb_start:
{
enum plugin_gen_from from = op->args[0];
enum plugin_gen_cb type = op->args[1];
tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start);
/* ENABLE_MEM_HELPER is the first callback of an instruction */
if (from == PLUGIN_GEN_FROM_INSN &&
type == PLUGIN_GEN_ENABLE_MEM_HELPER) {
insn_idx++;
switch (from) {
case PLUGIN_GEN_FROM_TB:
{
g_assert(insn_idx == -1);
switch (type) {
case PLUGIN_GEN_CB_UDATA:
plugin_gen_tb_udata(plugin_tb, op);
break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_tb_inline(plugin_tb, op);
break;
default:
g_assert_not_reached();
}
break;
}
case PLUGIN_GEN_FROM_INSN:
{
g_assert(insn_idx >= 0);
switch (type) {
case PLUGIN_GEN_CB_UDATA:
plugin_gen_insn_udata(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_insn_inline(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_ENABLE_MEM_HELPER:
plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx);
break;
default:
g_assert_not_reached();
}
break;
}
case PLUGIN_GEN_FROM_MEM:
{
g_assert(insn_idx >= 0);
switch (type) {
case PLUGIN_GEN_CB_MEM:
plugin_gen_mem_regular(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_mem_inline(plugin_tb, op, insn_idx);
break;
default:
g_assert_not_reached();
}
break;
}
case PLUGIN_GEN_AFTER_INSN:
{
g_assert(insn_idx >= 0);
switch (type) {
case PLUGIN_GEN_DISABLE_MEM_HELPER:
plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx);
break;
default:
g_assert_not_reached();
}
break;
}
default:
g_assert_not_reached();
}
break;
}
default:
/* plugins don't care about any other ops */
break;
}
plugin_inject_cb(plugin_tb, op, insn_idx);
}
pr_ops();
}
@ -847,7 +860,6 @@ bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_onl
if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
ret = true;
QSIMPLEQ_INIT(&tcg_ctx->plugin_ops);
ptb->vaddr = tb->pc;
ptb->vaddr2 = -1;
get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
@ -864,9 +876,8 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
struct qemu_plugin_insn *pinsn;
pinsn = qemu_plugin_tb_insn_get(ptb);
pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
tcg_ctx->plugin_insn = pinsn;
pinsn->vaddr = db->pc_next;
plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
/*

View File

@ -28,6 +28,7 @@
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "qemu/main-loop.h"
#include "qemu/notify.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
#include "hw/boards.h"
@ -35,6 +36,26 @@
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h"
typedef struct MttcgForceRcuNotifier {
Notifier notifier;
CPUState *cpu;
} MttcgForceRcuNotifier;
static void do_nothing(CPUState *cpu, run_on_cpu_data d)
{
}
static void mttcg_force_rcu(Notifier *notify, void *data)
{
CPUState *cpu = container_of(notify, MttcgForceRcuNotifier, notifier)->cpu;
/*
* Called with rcu_registry_lock held, using async_run_on_cpu() ensures
* that there are no deadlocks.
*/
async_run_on_cpu(cpu, do_nothing, RUN_ON_CPU_NULL);
}
/*
* In the multi-threaded case each vCPU has its own thread. The TLS
* variable current_cpu can be used deep in the code to find the
@ -43,12 +64,16 @@
static void *mttcg_cpu_thread_fn(void *arg)
{
MttcgForceRcuNotifier force_rcu;
CPUState *cpu = arg;
assert(tcg_enabled());
g_assert(!icount_enabled());
rcu_register_thread();
force_rcu.notifier.notify = mttcg_force_rcu;
force_rcu.cpu = cpu;
rcu_add_force_rcu_notifier(&force_rcu.notifier);
tcg_register_thread();
qemu_mutex_lock_iothread();
@ -100,6 +125,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
tcg_cpus_destroy(cpu);
qemu_mutex_unlock_iothread();
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
rcu_unregister_thread();
return NULL;
}

View File

@ -28,6 +28,7 @@
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "qemu/main-loop.h"
#include "qemu/notify.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
@ -133,6 +134,11 @@ static void rr_deal_with_unplugged_cpus(void)
}
}
static void rr_force_rcu(Notifier *notify, void *data)
{
rr_kick_next_cpu();
}
/*
* In the single-threaded case each vCPU is simulated in turn. If
* there is more than a single vCPU we create a simple timer to kick
@ -143,10 +149,13 @@ static void rr_deal_with_unplugged_cpus(void)
static void *rr_cpu_thread_fn(void *arg)
{
Notifier force_rcu;
CPUState *cpu = arg;
assert(tcg_enabled());
rcu_register_thread();
force_rcu.notify = rr_force_rcu;
rcu_add_force_rcu_notifier(&force_rcu);
tcg_register_thread();
qemu_mutex_lock_iothread();
@ -255,6 +264,7 @@ static void *rr_cpu_thread_fn(void *arg)
rr_deal_with_unplugged_cpus();
}
rcu_remove_force_rcu_notifier(&force_rcu);
rcu_unregister_thread();
return NULL;
}

View File

@ -2424,7 +2424,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu_loop_exit_noexc(cpu);
}
static void print_qht_statistics(struct qht_stats hst)
static void print_qht_statistics(struct qht_stats hst, GString *buf)
{
uint32_t hgram_opts;
size_t hgram_bins;
@ -2433,9 +2433,11 @@ static void print_qht_statistics(struct qht_stats hst)
if (!hst.head_buckets) {
return;
}
qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
g_string_append_printf(buf, "TB hash buckets %zu/%zu "
"(%0.2f%% head buckets used)\n",
hst.used_head_buckets, hst.head_buckets,
(double)hst.used_head_buckets / hst.head_buckets * 100);
(double)hst.used_head_buckets /
hst.head_buckets * 100);
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
@ -2443,7 +2445,8 @@ static void print_qht_statistics(struct qht_stats hst)
hgram_opts |= QDIST_PR_NODECIMAL;
}
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
"Histogram: %s\n",
qdist_avg(&hst.occupancy) * 100, hgram);
g_free(hgram);
@ -2456,7 +2459,8 @@ static void print_qht_statistics(struct qht_stats hst)
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
}
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
"Histogram: %s\n",
qdist_avg(&hst.chain), hgram);
g_free(hgram);
}
@ -2494,7 +2498,7 @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
return false;
}
void dump_exec_info(void)
void dump_exec_info(GString *buf)
{
struct tb_tree_stats tst = {};
struct qht_stats hst;
@ -2503,49 +2507,53 @@ void dump_exec_info(void)
tcg_tb_foreach(tb_tree_stats_iter, &tst);
nb_tbs = tst.nb_tbs;
/* XXX: avoid using doubles ? */
qemu_printf("Translation buffer state:\n");
g_string_append_printf(buf, "Translation buffer state:\n");
/*
* Report total code size including the padding and TB structs;
* otherwise users might think "-accel tcg,tb-size" is not honoured.
* For avg host size we use the precise numbers from tb_tree_stats though.
*/
qemu_printf("gen code size %zu/%zu\n",
g_string_append_printf(buf, "gen code size %zu/%zu\n",
tcg_code_size(), tcg_code_capacity());
qemu_printf("TB count %zu\n", nb_tbs);
qemu_printf("TB avg target size %zu max=%zu bytes\n",
g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
nb_tbs ? tst.target_size / nb_tbs : 0,
tst.max_target_size);
qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
g_string_append_printf(buf, "TB avg host size %zu bytes "
"(expansion ratio: %0.1f)\n",
nb_tbs ? tst.host_size / nb_tbs : 0,
tst.target_size ? (double)tst.host_size / tst.target_size : 0);
qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
tst.target_size ?
(double)tst.host_size / tst.target_size : 0);
g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
tst.cross_page,
nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
"(2 jumps=%zu %zu%%)\n",
tst.direct_jmp_count,
nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
tst.direct_jmp2_count,
nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
qht_statistics_init(&tb_ctx.htable, &hst);
print_qht_statistics(hst);
print_qht_statistics(hst, buf);
qht_statistics_destroy(&hst);
qemu_printf("\nStatistics:\n");
qemu_printf("TB flush count %u\n",
g_string_append_printf(buf, "\nStatistics:\n");
g_string_append_printf(buf, "TB flush count %u\n",
qatomic_read(&tb_ctx.tb_flush_count));
qemu_printf("TB invalidate count %u\n",
g_string_append_printf(buf, "TB invalidate count %u\n",
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
qemu_printf("TLB full flushes %zu\n", flush_full);
qemu_printf("TLB partial flushes %zu\n", flush_part);
qemu_printf("TLB elided flushes %zu\n", flush_elide);
tcg_dump_info();
g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
tcg_dump_info(buf);
}
void dump_opcount_info(void)
void dump_opcount_info(GString *buf)
{
tcg_dump_op_count();
tcg_dump_op_count(buf);
}
#else /* CONFIG_USER_ONLY */

View File

@ -215,7 +215,7 @@ static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
if (do_swap) { \
ret = swap_fn(ret); \
} \
plugin_insn_append(&ret, sizeof(ret)); \
plugin_insn_append(pc, &ret, sizeof(ret)); \
return ret; \
}

File diff suppressed because it is too large Load Diff

View File

@ -7,23 +7,22 @@ softmmu_ss.add(files(
'wavcapture.c',
))
softmmu_ss.add(when: [coreaudio, 'CONFIG_AUDIO_COREAUDIO'], if_true: files('coreaudio.c'))
softmmu_ss.add(when: [dsound, 'CONFIG_AUDIO_DSOUND'], if_true: files('dsoundaudio.c'))
softmmu_ss.add(when: ['CONFIG_AUDIO_WIN_INT'], if_true: files('audio_win_int.c'))
softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.c'))
softmmu_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c'))
audio_modules = {}
foreach m : [
['CONFIG_AUDIO_ALSA', 'alsa', alsa, 'alsaaudio.c'],
['CONFIG_AUDIO_OSS', 'oss', oss, 'ossaudio.c'],
['CONFIG_AUDIO_PA', 'pa', pulse, 'paaudio.c'],
['CONFIG_AUDIO_SDL', 'sdl', sdl, 'sdlaudio.c'],
['CONFIG_AUDIO_JACK', 'jack', jack, 'jackaudio.c'],
['CONFIG_SPICE', 'spice', spice, 'spiceaudio.c']
['alsa', alsa, files('alsaaudio.c')],
['oss', oss, files('ossaudio.c')],
['pa', pulse, files('paaudio.c')],
['sdl', sdl, files('sdlaudio.c')],
['jack', jack, files('jackaudio.c')],
['spice', spice, files('spiceaudio.c')]
]
if config_host.has_key(m[0])
if m[1].found()
module_ss = ss.source_set()
module_ss.add(when: m[2], if_true: files(m[3]))
audio_modules += {m[1] : module_ss}
module_ss.add(m[1], m[2])
audio_modules += {m[0] : module_ss}
endif
endforeach

82
backends/hostmem-epc.c Normal file
View File

@ -0,0 +1,82 @@
/*
* QEMU host SGX EPC memory backend
*
* Copyright (C) 2019 Intel Corporation
*
* Authors:
* Sean Christopherson <sean.j.christopherson@intel.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include <sys/ioctl.h>
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qom/object_interfaces.h"
#include "qapi/error.h"
#include "sysemu/hostmem.h"
#include "hw/i386/hostmem-epc.h"
static void
sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
uint32_t ram_flags;
char *name;
int fd;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return;
}
fd = qemu_open_old("/dev/sgx_vepc", O_RDWR);
if (fd < 0) {
error_setg_errno(errp, errno,
"failed to open /dev/sgx_vepc to alloc SGX EPC");
return;
}
name = object_get_canonical_path(OBJECT(backend));
ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED;
memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend),
name, backend->size, ram_flags,
fd, 0, errp);
g_free(name);
}
static void sgx_epc_backend_instance_init(Object *obj)
{
HostMemoryBackend *m = MEMORY_BACKEND(obj);
m->share = true;
m->merge = false;
m->dump = false;
}
static void sgx_epc_backend_class_init(ObjectClass *oc, void *data)
{
HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc);
bc->alloc = sgx_epc_backend_memory_alloc;
}
static const TypeInfo sgx_epc_backed_info = {
.name = TYPE_MEMORY_BACKEND_EPC,
.parent = TYPE_MEMORY_BACKEND,
.instance_init = sgx_epc_backend_instance_init,
.class_init = sgx_epc_backend_class_init,
.instance_size = sizeof(HostMemoryBackendEpc),
};
static void register_types(void)
{
int fd = qemu_open_old("/dev/sgx_vepc", O_RDWR);
if (fd >= 0) {
close(fd);
type_register_static(&sgx_epc_backed_info);
}
}
type_init(register_types);

View File

@ -16,5 +16,6 @@ softmmu_ss.add(when: ['CONFIG_VHOST_USER', 'CONFIG_VIRTIO'], if_true: files('vho
softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c'))
softmmu_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VHOST_CRYPTO'], if_true: files('cryptodev-vhost-user.c'))
softmmu_ss.add(when: 'CONFIG_GIO', if_true: [files('dbus-vmstate.c'), gio])
softmmu_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c'))
subdir('tpm')

89
block.c
View File

@ -84,6 +84,9 @@ static BlockDriverState *bdrv_open_inherit(const char *filename,
BdrvChildRole child_role,
Error **errp);
static bool bdrv_recurse_has_child(BlockDriverState *bs,
BlockDriverState *child);
static void bdrv_replace_child_noperm(BdrvChild *child,
BlockDriverState *new_bs);
static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
@ -1604,16 +1607,26 @@ open_failed:
return ret;
}
BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
int flags, Error **errp)
/*
* Create and open a block node.
*
* @options is a QDict of options to pass to the block drivers, or NULL for an
* empty set of options. The reference to the QDict belongs to the block layer
* after the call (even on failure), so if the caller intends to reuse the
* dictionary, it needs to use qobject_ref() before calling bdrv_open.
*/
BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv,
const char *node_name,
QDict *options, int flags,
Error **errp)
{
BlockDriverState *bs;
int ret;
bs = bdrv_new();
bs->open_flags = flags;
bs->explicit_options = qdict_new();
bs->options = qdict_new();
bs->options = options ?: qdict_new();
bs->explicit_options = qdict_clone_shallow(bs->options);
bs->opaque = NULL;
update_options_from_flags(bs->options, flags);
@ -1631,6 +1644,13 @@ BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
return bs;
}
/* Create and open a block node. */
BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
int flags, Error **errp)
{
return bdrv_new_open_driver_opts(drv, node_name, NULL, flags, errp);
}
QemuOptsList bdrv_runtime_opts = {
.name = "bdrv_common",
.head = QTAILQ_HEAD_INITIALIZER(bdrv_runtime_opts.head),
@ -2656,6 +2676,7 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
int drain_saldo;
assert(!child->frozen);
assert(old_bs != new_bs);
if (old_bs && new_bs) {
assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs));
@ -2875,6 +2896,12 @@ static int bdrv_attach_child_noperm(BlockDriverState *parent_bs,
assert(parent_bs->drv);
if (bdrv_recurse_has_child(child_bs, parent_bs)) {
error_setg(errp, "Making '%s' a %s child of '%s' would create a cycle",
child_bs->node_name, child_name, parent_bs->node_name);
return -EINVAL;
}
bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm);
bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL,
perm, shared_perm, &perm, &shared_perm);
@ -5102,29 +5129,61 @@ static void bdrv_delete(BlockDriverState *bs)
g_free(bs);
}
BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options,
/*
* Replace @bs by newly created block node.
*
* @options is a QDict of options to pass to the block drivers, or NULL for an
* empty set of options. The reference to the QDict belongs to the block layer
* after the call (even on failure), so if the caller intends to reuse the
* dictionary, it needs to use qobject_ref() before calling bdrv_open.
*/
BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
int flags, Error **errp)
{
BlockDriverState *new_node_bs;
Error *local_err = NULL;
ERRP_GUARD();
int ret;
BlockDriverState *new_node_bs = NULL;
const char *drvname, *node_name;
BlockDriver *drv;
new_node_bs = bdrv_open(NULL, NULL, node_options, flags, errp);
if (new_node_bs == NULL) {
drvname = qdict_get_try_str(options, "driver");
if (!drvname) {
error_setg(errp, "driver is not specified");
goto fail;
}
drv = bdrv_find_format(drvname);
if (!drv) {
error_setg(errp, "Unknown driver: '%s'", drvname);
goto fail;
}
node_name = qdict_get_try_str(options, "node-name");
new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags,
errp);
options = NULL; /* bdrv_new_open_driver() eats options */
if (!new_node_bs) {
error_prepend(errp, "Could not create node: ");
return NULL;
goto fail;
}
bdrv_drained_begin(bs);
bdrv_replace_node(bs, new_node_bs, &local_err);
ret = bdrv_replace_node(bs, new_node_bs, errp);
bdrv_drained_end(bs);
if (local_err) {
bdrv_unref(new_node_bs);
error_propagate(errp, local_err);
return NULL;
if (ret < 0) {
error_prepend(errp, "Could not replace node: ");
goto fail;
}
return new_node_bs;
fail:
qobject_unref(options);
bdrv_unref(new_node_bs);
return NULL;
}
/*

View File

@ -98,6 +98,8 @@ AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks)
{
AioTaskPool *pool = g_new0(AioTaskPool, 1);
assert(max_busy_tasks > 0);
pool->main_co = qemu_coroutine_self();
pool->max_busy_tasks = max_busy_tasks;

View File

@ -327,11 +327,12 @@ static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed)
}
}
static void backup_cancel(Job *job, bool force)
static bool backup_cancel(Job *job, bool force)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
bdrv_cancel_in_flight(s->target_bs);
return true;
}
static const BlockJobDriver backup_job_driver = {
@ -407,8 +408,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
return NULL;
}
if (perf->max_workers < 1) {
error_setg(errp, "max-workers must be greater than zero");
if (perf->max_workers < 1 || perf->max_workers > INT_MAX) {
error_setg(errp, "max-workers must be between 1 and %d", INT_MAX);
return NULL;
}

View File

@ -631,8 +631,8 @@ static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
}
static int coroutine_fn
blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
blkdebug_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
int err;
@ -652,8 +652,8 @@ blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
}
static int coroutine_fn
blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
blkdebug_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
int err;
@ -684,7 +684,7 @@ static int blkdebug_co_flush(BlockDriverState *bs)
}
static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes,
int64_t offset, int64_t bytes,
BdrvRequestFlags flags)
{
uint32_t align = MAX(bs->bl.request_alignment,
@ -717,7 +717,7 @@ static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
}
static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
uint32_t align = bs->bl.pdiscard_alignment;
int err;

View File

@ -301,8 +301,8 @@ static void blk_log_writes_refresh_limits(BlockDriverState *bs, Error **errp)
}
static int coroutine_fn
blk_log_writes_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
blk_log_writes_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
}
@ -460,16 +460,16 @@ blk_log_writes_co_do_file_pdiscard(BlkLogWritesFileReq *fr)
}
static int coroutine_fn
blk_log_writes_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
blk_log_writes_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
return blk_log_writes_co_log(bs, offset, bytes, qiov, flags,
blk_log_writes_co_do_file_pwritev, 0, false);
}
static int coroutine_fn
blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
BdrvRequestFlags flags)
blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
return blk_log_writes_co_log(bs, offset, bytes, NULL, flags,
blk_log_writes_co_do_file_pwrite_zeroes, 0,
@ -484,9 +484,9 @@ static int coroutine_fn blk_log_writes_co_flush_to_disk(BlockDriverState *bs)
}
static int coroutine_fn
blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
return blk_log_writes_co_log(bs, offset, count, NULL, 0,
return blk_log_writes_co_log(bs, offset, bytes, NULL, 0,
blk_log_writes_co_do_file_pdiscard,
LOG_DISCARD_FLAG, false);
}

View File

@ -72,7 +72,7 @@ static void block_request_create(uint64_t reqid, BlockDriverState *bs,
}
static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
{
uint64_t reqid = blkreplay_next_id();
int ret = bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
@ -83,7 +83,7 @@ static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs,
}
static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
{
uint64_t reqid = blkreplay_next_id();
int ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
@ -94,7 +94,7 @@ static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs,
}
static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
{
uint64_t reqid = blkreplay_next_id();
int ret = bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
@ -105,7 +105,7 @@ static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
}
static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
uint64_t reqid = blkreplay_next_id();
int ret = bdrv_co_pdiscard(bs->file, offset, bytes);

View File

@ -221,8 +221,8 @@ blkverify_co_prwv(BlockDriverState *bs, BlkverifyRequest *r, uint64_t offset,
}
static int coroutine_fn
blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
blkverify_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BlkverifyRequest r;
QEMUIOVector raw_qiov;
@ -250,8 +250,8 @@ blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
}
static int coroutine_fn
blkverify_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BlkverifyRequest r;
return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true);

View File

@ -14,6 +14,7 @@
#include "sysemu/block-backend.h"
#include "block/block_int.h"
#include "block/blockjob.h"
#include "block/coroutines.h"
#include "block/throttle-groups.h"
#include "hw/qdev-core.h"
#include "sysemu/blockdev.h"
@ -1161,11 +1162,11 @@ void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
}
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
size_t size)
int64_t bytes)
{
int64_t len;
if (size > INT_MAX) {
if (bytes < 0) {
return -EIO;
}
@ -1183,7 +1184,7 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
return len;
}
if (offset > len || len - offset < size) {
if (offset > len || len - offset < bytes) {
return -EIO;
}
}
@ -1204,8 +1205,8 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
static int coroutine_fn
blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes,
int coroutine_fn
blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
int ret;
@ -1236,21 +1237,21 @@ blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes,
}
int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret;
blk_inc_in_flight(blk);
ret = blk_do_preadv(blk, offset, bytes, qiov, flags);
ret = blk_co_do_preadv(blk, offset, bytes, qiov, flags);
blk_dec_in_flight(blk);
return ret;
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
static int coroutine_fn
blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes,
int coroutine_fn
blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags)
{
@ -1286,7 +1287,28 @@ blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes,
}
int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
unsigned int bytes,
int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags)
{
int ret;
blk_inc_in_flight(blk);
ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags);
blk_dec_in_flight(blk);
return ret;
}
int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags);
}
static int coroutine_fn blk_pwritev_part(BlockBackend *blk, int64_t offset,
int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags)
{
@ -1299,13 +1321,6 @@ int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
return ret;
}
int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags);
}
typedef struct BlkRwCo {
BlockBackend *blk;
int64_t offset;
@ -1314,57 +1329,10 @@ typedef struct BlkRwCo {
BdrvRequestFlags flags;
} BlkRwCo;
static void blk_read_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
QEMUIOVector *qiov = rwco->iobuf;
rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, qiov->size,
qiov, rwco->flags);
aio_wait_kick();
}
static void blk_write_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
QEMUIOVector *qiov = rwco->iobuf;
rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, qiov->size,
qiov, 0, rwco->flags);
aio_wait_kick();
}
static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
int64_t bytes, CoroutineEntry co_entry,
BdrvRequestFlags flags)
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
BlkRwCo rwco = {
.blk = blk,
.offset = offset,
.iobuf = &qiov,
.flags = flags,
.ret = NOT_DONE,
};
blk_inc_in_flight(blk);
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
co_entry(&rwco);
} else {
Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
bdrv_coroutine_enter(blk_bs(blk), co);
BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
}
blk_dec_in_flight(blk);
return rwco.ret;
}
int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int bytes, BdrvRequestFlags flags)
int64_t bytes, BdrvRequestFlags flags)
{
return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
return blk_pwritev_part(blk, offset, bytes, NULL, 0,
flags | BDRV_REQ_ZERO_WRITE);
}
@ -1412,7 +1380,7 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
typedef struct BlkAioEmAIOCB {
BlockAIOCB common;
BlkRwCo rwco;
int bytes;
int64_t bytes;
bool has_returned;
} BlkAioEmAIOCB;
@ -1444,7 +1412,8 @@ static void blk_aio_complete_bh(void *opaque)
blk_aio_complete(acb);
}
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset,
int64_t bytes,
void *iobuf, CoroutineEntry co_entry,
BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
@ -1483,7 +1452,7 @@ static void blk_aio_read_entry(void *opaque)
QEMUIOVector *qiov = rwco->iobuf;
assert(qiov->size == acb->bytes);
rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, acb->bytes,
rwco->ret = blk_co_do_preadv(rwco->blk, rwco->offset, acb->bytes,
qiov, rwco->flags);
blk_aio_complete(acb);
}
@ -1495,37 +1464,40 @@ static void blk_aio_write_entry(void *opaque)
QEMUIOVector *qiov = rwco->iobuf;
assert(!qiov || qiov->size == acb->bytes);
rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
qiov, 0, rwco->flags);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags,
int64_t bytes, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_write_entry,
flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
}
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int bytes)
{
int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
if (ret < 0) {
return ret;
}
return count;
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
blk_inc_in_flight(blk);
ret = blk_do_preadv(blk, offset, bytes, &qiov, 0);
blk_dec_in_flight(blk);
return ret < 0 ? ret : bytes;
}
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int bytes,
BdrvRequestFlags flags)
{
int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
flags);
if (ret < 0) {
return ret;
}
return count;
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
ret = blk_pwritev_part(blk, offset, bytes, &qiov, 0, flags);
return ret < 0 ? ret : bytes;
}
int64_t blk_getlength(BlockBackend *blk)
@ -1559,6 +1531,7 @@ BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
assert((uint64_t)qiov->size <= INT64_MAX);
return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_read_entry, flags, cb, opaque);
}
@ -1567,6 +1540,7 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
assert((uint64_t)qiov->size <= INT64_MAX);
return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_write_entry, flags, cb, opaque);
}
@ -1582,8 +1556,8 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
static int coroutine_fn
blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
int coroutine_fn
blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
blk_wait_while_drained(blk);
@ -1594,18 +1568,15 @@ blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
return bdrv_co_ioctl(blk_bs(blk), req, buf);
}
static void blk_ioctl_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
QEMUIOVector *qiov = rwco->iobuf;
rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, qiov->iov[0].iov_base);
aio_wait_kick();
}
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
int ret;
blk_inc_in_flight(blk);
ret = blk_do_ioctl(blk, req, buf);
blk_dec_in_flight(blk);
return ret;
}
static void blk_aio_ioctl_entry(void *opaque)
@ -1613,7 +1584,7 @@ static void blk_aio_ioctl_entry(void *opaque)
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
rwco->ret = blk_co_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
blk_aio_complete(acb);
}
@ -1625,8 +1596,8 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
static int coroutine_fn
blk_do_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
int coroutine_fn
blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
{
int ret;
@ -1645,19 +1616,31 @@ static void blk_aio_pdiscard_entry(void *opaque)
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, acb->bytes);
rwco->ret = blk_co_do_pdiscard(rwco->blk, rwco->offset, acb->bytes);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
int64_t offset, int bytes,
int64_t offset, int64_t bytes,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
cb, opaque);
}
int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
int64_t bytes)
{
int ret;
blk_inc_in_flight(blk);
ret = blk_co_do_pdiscard(blk, offset, bytes);
blk_dec_in_flight(blk);
return ret;
}
int blk_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
{
int ret;
@ -1668,22 +1651,8 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
return ret;
}
static void blk_pdiscard_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
QEMUIOVector *qiov = rwco->iobuf;
rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, qiov->size);
aio_wait_kick();
}
int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
{
return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
static int coroutine_fn blk_do_flush(BlockBackend *blk)
int coroutine_fn blk_co_do_flush(BlockBackend *blk)
{
blk_wait_while_drained(blk);
@ -1699,7 +1668,7 @@ static void blk_aio_flush_entry(void *opaque)
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_do_flush(rwco->blk);
rwco->ret = blk_co_do_flush(rwco->blk);
blk_aio_complete(acb);
}
@ -1714,22 +1683,21 @@ int coroutine_fn blk_co_flush(BlockBackend *blk)
int ret;
blk_inc_in_flight(blk);
ret = blk_do_flush(blk);
ret = blk_co_do_flush(blk);
blk_dec_in_flight(blk);
return ret;
}
static void blk_flush_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
rwco->ret = blk_do_flush(rwco->blk);
aio_wait_kick();
}
int blk_flush(BlockBackend *blk)
{
return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
int ret;
blk_inc_in_flight(blk);
ret = blk_do_flush(blk);
blk_dec_in_flight(blk);
return ret;
}
void blk_drain(BlockBackend *blk)
@ -1986,6 +1954,12 @@ uint32_t blk_get_max_transfer(BlockBackend *blk)
return ROUND_DOWN(max, blk_get_request_alignment(blk));
}
int blk_get_max_hw_iov(BlockBackend *blk)
{
return MIN_NON_ZERO(blk->root->bs->bl.max_hw_iov,
blk->root->bs->bl.max_iov);
}
int blk_get_max_iov(BlockBackend *blk)
{
return blk->root->bs->bl.max_iov;
@ -2208,16 +2182,17 @@ void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
}
int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int bytes, BdrvRequestFlags flags)
int64_t bytes, BdrvRequestFlags flags)
{
return blk_co_pwritev(blk, offset, bytes, NULL,
flags | BDRV_REQ_ZERO_WRITE);
}
int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
int count)
int64_t bytes)
{
return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
return blk_pwritev_part(blk, offset, bytes, &qiov, 0,
BDRV_REQ_WRITE_COMPRESSED);
}
@ -2446,7 +2421,7 @@ void blk_unregister_buf(BlockBackend *blk, void *host)
int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
BlockBackend *blk_out, int64_t off_out,
int bytes, BdrvRequestFlags read_flags,
int64_t bytes, BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
int r;

View File

@ -238,8 +238,8 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
}
static int coroutine_fn
bochs_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
bochs_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVBochsState *s = bs->opaque;
uint64_t sector_num = offset >> BDRV_SECTOR_BITS;

View File

@ -245,8 +245,8 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num)
}
static int coroutine_fn
cloop_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
cloop_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVCloopState *s = bs->opaque;
uint64_t sector_num = offset >> BDRV_SECTOR_BITS;

View File

@ -207,7 +207,7 @@ static const BlockJobDriver commit_job_driver = {
};
static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
{
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}

View File

@ -40,8 +40,8 @@ typedef struct BDRVCopyBeforeWriteState {
} BDRVCopyBeforeWriteState;
static coroutine_fn int cbw_co_preadv(
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
}
@ -64,7 +64,7 @@ static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
}
static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
int ret = cbw_do_copy_before_write(bs, offset, bytes, 0);
if (ret < 0) {
@ -75,7 +75,7 @@ static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs,
}
static int coroutine_fn cbw_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
{
int ret = cbw_do_copy_before_write(bs, offset, bytes, flags);
if (ret < 0) {
@ -86,9 +86,10 @@ static int coroutine_fn cbw_co_pwrite_zeroes(BlockDriverState *bs,
}
static coroutine_fn int cbw_co_pwritev(BlockDriverState *bs,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov, int flags)
int64_t offset,
int64_t bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret = cbw_do_copy_before_write(bs, offset, bytes, flags);
if (ret < 0) {

View File

@ -128,10 +128,10 @@ static int64_t cor_getlength(BlockDriverState *bs)
static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset,
int flags)
BdrvRequestFlags flags)
{
int64_t n;
int local_flags;
@ -181,10 +181,11 @@ static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs,
static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs,
uint64_t offset,
uint64_t bytes,
int64_t offset,
int64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset, int flags)
size_t qiov_offset,
BdrvRequestFlags flags)
{
return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset,
flags);
@ -192,7 +193,7 @@ static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs,
static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes,
int64_t offset, int64_t bytes,
BdrvRequestFlags flags)
{
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
@ -200,15 +201,15 @@ static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs,
static int coroutine_fn cor_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
return bdrv_co_pdiscard(bs->file, offset, bytes);
}
static int coroutine_fn cor_co_pwritev_compressed(BlockDriverState *bs,
uint64_t offset,
uint64_t bytes,
int64_t offset,
int64_t bytes,
QEMUIOVector *qiov)
{
return bdrv_co_pwritev(bs->file, offset, bytes, qiov,

View File

@ -27,6 +27,9 @@
#include "block/block_int.h"
/* For blk_bs() in generated block/block-gen.c */
#include "sysemu/block-backend.h"
int coroutine_fn bdrv_co_check(BlockDriverState *bs,
BdrvCheckResult *res, BdrvCheckMode fix);
int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
@ -72,4 +75,34 @@ int coroutine_fn
nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp);
int generated_co_wrapper
blk_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags);
int coroutine_fn
blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags);
int generated_co_wrapper
blk_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags);
int coroutine_fn
blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags);
int generated_co_wrapper
blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
int coroutine_fn
blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
int generated_co_wrapper
blk_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
int coroutine_fn
blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
int generated_co_wrapper blk_do_flush(BlockBackend *blk);
int coroutine_fn blk_co_do_flush(BlockBackend *blk);
#endif /* BLOCK_COROUTINES_INT_H */

View File

@ -397,8 +397,8 @@ static int block_crypto_reopen_prepare(BDRVReopenState *state,
#define BLOCK_CRYPTO_MAX_IO_SIZE (1024 * 1024)
static coroutine_fn int
block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
block_crypto_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BlockCrypto *crypto = bs->opaque;
uint64_t cur_bytes; /* number of bytes in current iteration */
@ -460,8 +460,8 @@ block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
static coroutine_fn int
block_crypto_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
block_crypto_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BlockCrypto *crypto = bs->opaque;
uint64_t cur_bytes; /* number of bytes in current iteration */

View File

@ -896,7 +896,8 @@ out:
}
static int coroutine_fn curl_co_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
CURLAIOCB acb = {
.co = qemu_coroutine_self(),

View File

@ -689,8 +689,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
}
static int coroutine_fn
dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVDMGState *s = bs->opaque;
uint64_t sector_num = offset >> BDRV_SECTOR_BITS;

View File

@ -31,6 +31,10 @@
#include <fuse.h>
#include <fuse_lowlevel.h>
#if defined(CONFIG_FALLOCATE_ZERO_RANGE)
#include <linux/falloc.h>
#endif
#ifdef __linux__
#include <linux/fs.h>
#endif

View File

@ -150,6 +150,8 @@ typedef struct BDRVRawState {
uint64_t locked_perm;
uint64_t locked_shared_perm;
uint64_t aio_max_batch;
int perm_change_fd;
int perm_change_flags;
BDRVReopenState *reopen_state;
@ -530,6 +532,11 @@ static QemuOptsList raw_runtime_opts = {
.type = QEMU_OPT_STRING,
.help = "host AIO implementation (threads, native, io_uring)",
},
{
.name = "aio-max-batch",
.type = QEMU_OPT_NUMBER,
.help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)",
},
{
.name = "locking",
.type = QEMU_OPT_STRING,
@ -609,6 +616,8 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING);
#endif
s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0);
locking = qapi_enum_parse(&OnOffAuto_lookup,
qemu_opt_get(opts, "locking"),
ON_OFF_AUTO_AUTO, &local_err);
@ -1273,7 +1282,7 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
ret = hdev_get_max_segments(s->fd, &st);
if (ret > 0) {
bs->bl.max_iov = ret;
bs->bl.max_hw_iov = ret;
}
}
}
@ -1807,7 +1816,7 @@ static int handle_aiocb_copy_range(void *opaque)
static int handle_aiocb_discard(void *opaque)
{
RawPosixAIOData *aiocb = opaque;
int ret = -EOPNOTSUPP;
int ret = -ENOTSUP;
BDRVRawState *s = aiocb->bs->opaque;
if (!s->has_discard) {
@ -1829,7 +1838,7 @@ static int handle_aiocb_discard(void *opaque)
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
aiocb->aio_offset, aiocb->aio_nbytes);
ret = translate_err(-errno);
ret = translate_err(ret);
#elif defined(__APPLE__) && (__MACH__)
fpunchhole_t fpunchhole;
fpunchhole.fp_flags = 0;
@ -2057,7 +2066,8 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
} else if (s->use_linux_aio) {
LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
assert(qiov->size == bytes);
return laio_co_submit(bs, aio, s->fd, offset, qiov, type);
return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
s->aio_max_batch);
#endif
}
@ -2077,16 +2087,16 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
}
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ);
}
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
assert(flags == 0);
return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE);
@ -2115,7 +2125,7 @@ static void raw_aio_unplug(BlockDriverState *bs)
#ifdef CONFIG_LINUX_AIO
if (s->use_linux_aio) {
LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
laio_io_unplug(bs, aio);
laio_io_unplug(bs, aio, s->aio_max_batch);
}
#endif
#ifdef CONFIG_LINUX_IO_URING
@ -2942,7 +2952,8 @@ static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret)
}
static coroutine_fn int
raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev)
raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes,
bool blkdev)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData acb;
@ -2966,13 +2977,13 @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev)
}
static coroutine_fn int
raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
return raw_do_pdiscard(bs, offset, bytes, false);
}
static int coroutine_fn
raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
BdrvRequestFlags flags, bool blkdev)
{
BDRVRawState *s = bs->opaque;
@ -3040,7 +3051,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
static int coroutine_fn raw_co_pwrite_zeroes(
BlockDriverState *bs, int64_t offset,
int bytes, BdrvRequestFlags flags)
int64_t bytes, BdrvRequestFlags flags)
{
return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false);
}
@ -3203,8 +3214,8 @@ static void raw_abort_perm_update(BlockDriverState *bs)
}
static int coroutine_fn raw_co_copy_range_from(
BlockDriverState *bs, BdrvChild *src, uint64_t src_offset,
BdrvChild *dst, uint64_t dst_offset, uint64_t bytes,
BlockDriverState *bs, BdrvChild *src, int64_t src_offset,
BdrvChild *dst, int64_t dst_offset, int64_t bytes,
BdrvRequestFlags read_flags, BdrvRequestFlags write_flags)
{
return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
@ -3213,10 +3224,10 @@ static int coroutine_fn raw_co_copy_range_from(
static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
BdrvChild *src,
uint64_t src_offset,
int64_t src_offset,
BdrvChild *dst,
uint64_t dst_offset,
uint64_t bytes,
int64_t dst_offset,
int64_t bytes,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
@ -3591,7 +3602,7 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
#endif /* linux */
static coroutine_fn int
hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
BDRVRawState *s = bs->opaque;
int ret;
@ -3605,7 +3616,7 @@ hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
}
static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
{
int rc;

View File

@ -440,8 +440,8 @@ fail:
}
static BlockAIOCB *raw_aio_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
BDRVRawState *s = bs->opaque;
@ -455,8 +455,8 @@ static BlockAIOCB *raw_aio_preadv(BlockDriverState *bs,
}
static BlockAIOCB *raw_aio_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
BDRVRawState *s = bs->opaque;

View File

@ -63,10 +63,10 @@ static int64_t compress_getlength(BlockDriverState *bs)
static int coroutine_fn compress_co_preadv_part(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset,
int flags)
BdrvRequestFlags flags)
{
return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset,
flags);
@ -74,10 +74,11 @@ static int coroutine_fn compress_co_preadv_part(BlockDriverState *bs,
static int coroutine_fn compress_co_pwritev_part(BlockDriverState *bs,
uint64_t offset,
uint64_t bytes,
int64_t offset,
int64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset, int flags)
size_t qiov_offset,
BdrvRequestFlags flags)
{
return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset,
flags | BDRV_REQ_WRITE_COMPRESSED);
@ -85,7 +86,7 @@ static int coroutine_fn compress_co_pwritev_part(BlockDriverState *bs,
static int coroutine_fn compress_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes,
int64_t offset, int64_t bytes,
BdrvRequestFlags flags)
{
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
@ -93,7 +94,7 @@ static int coroutine_fn compress_co_pwrite_zeroes(BlockDriverState *bs,
static int coroutine_fn compress_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
return bdrv_co_pdiscard(bs->file, offset, bytes);
}

View File

@ -891,6 +891,7 @@ out:
static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp)
{
bs->bl.max_transfer = GLUSTER_MAX_TRANSFER;
bs->bl.max_pdiscard = SIZE_MAX;
}
static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
@ -1003,19 +1004,19 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
#ifdef CONFIG_GLUSTERFS_ZEROFILL
static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset,
int size,
int64_t bytes,
BdrvRequestFlags flags)
{
int ret;
GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque;
acb.size = size;
acb.size = bytes;
acb.ret = 0;
acb.coroutine = qemu_coroutine_self();
acb.aio_context = bdrv_get_aio_context(bs);
ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
ret = glfs_zerofill_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
if (ret < 0) {
return -errno;
}
@ -1297,18 +1298,20 @@ error:
#ifdef CONFIG_GLUSTERFS_DISCARD
static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
int64_t offset, int size)
int64_t offset, int64_t bytes)
{
int ret;
GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque;
assert(bytes <= SIZE_MAX); /* rely on max_pdiscard */
acb.size = 0;
acb.ret = 0;
acb.coroutine = qemu_coroutine_self();
acb.aio_context = bdrv_get_aio_context(bs);
ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
ret = glfs_discard_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
if (ret < 0) {
return -errno;
}

View File

@ -136,6 +136,7 @@ static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
dst->min_mem_alignment = MAX(dst->min_mem_alignment,
src->min_mem_alignment);
dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
}
typedef struct BdrvRefreshLimitsState {
@ -956,7 +957,7 @@ bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
return waited;
}
static int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset,
Error **errp)
{
@ -1230,7 +1231,8 @@ out:
static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset, int flags)
size_t qiov_offset,
BdrvRequestFlags flags)
{
BlockDriver *drv = bs->drv;
int64_t sector_num;
@ -1868,7 +1870,8 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int head = 0;
int tail = 0;
int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
INT64_MAX);
int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
bs->bl.request_alignment);
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
@ -2073,7 +2076,8 @@ bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
*/
static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
int64_t align, QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags)
{
BlockDriverState *bs = child->bs;
BlockDriver *drv = bs->drv;
@ -2246,7 +2250,11 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
return -ENOMEDIUM;
}
if (flags & BDRV_REQ_ZERO_WRITE) {
ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
} else {
ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
}
if (ret < 0) {
return ret;
}
@ -2810,7 +2818,12 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
{
BlockDriver *drv = bs->drv;
BlockDriverState *child_bs = bdrv_primary_bs(bs);
int ret = -ENOTSUP;
int ret;
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
if (ret < 0) {
return ret;
}
if (!drv) {
return -ENOMEDIUM;
@ -2822,6 +2835,8 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
ret = drv->bdrv_load_vmstate(bs, qiov, pos);
} else if (child_bs) {
ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
} else {
ret = -ENOTSUP;
}
bdrv_dec_in_flight(bs);
@ -2834,7 +2849,12 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
{
BlockDriver *drv = bs->drv;
BlockDriverState *child_bs = bdrv_primary_bs(bs);
int ret = -ENOTSUP;
int ret;
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
if (ret < 0) {
return ret;
}
if (!drv) {
return -ENOMEDIUM;
@ -2846,6 +2866,8 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
ret = drv->bdrv_save_vmstate(bs, qiov, pos);
} else if (child_bs) {
ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
} else {
ret = -ENOTSUP;
}
bdrv_dec_in_flight(bs);
@ -3035,7 +3057,8 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
int64_t bytes)
{
BdrvTrackedRequest req;
int max_pdiscard, ret;
int ret;
int64_t max_pdiscard;
int head, tail, align;
BlockDriverState *bs = child->bs;
@ -3082,7 +3105,7 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
goto out;
}
max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
align);
assert(max_pdiscard >= bs->bl.request_alignment);

View File

@ -427,14 +427,14 @@ static int64_t sector_qemu2lun(int64_t sector, IscsiLun *iscsilun)
return sector * BDRV_SECTOR_SIZE / iscsilun->block_size;
}
static bool is_byte_request_lun_aligned(int64_t offset, int count,
static bool is_byte_request_lun_aligned(int64_t offset, int64_t bytes,
IscsiLun *iscsilun)
{
if (offset % iscsilun->block_size || count % iscsilun->block_size) {
if (offset % iscsilun->block_size || bytes % iscsilun->block_size) {
error_report("iSCSI misaligned request: "
"iscsilun->block_size %u, offset %" PRIi64
", count %d",
iscsilun->block_size, offset, count);
", bytes %" PRIi64,
iscsilun->block_size, offset, bytes);
return false;
}
return true;
@ -1138,7 +1138,8 @@ iscsi_getlength(BlockDriverState *bs)
}
static int
coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset,
int64_t bytes)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
@ -1154,6 +1155,12 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
return 0;
}
/*
* We don't want to overflow list.num which is uint32_t.
* We rely on our max_pdiscard.
*/
assert(bytes / iscsilun->block_size <= UINT32_MAX);
list.lba = offset / iscsilun->block_size;
list.num = bytes / iscsilun->block_size;
@ -1202,12 +1209,12 @@ out_unlock:
static int
coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int bytes, BdrvRequestFlags flags)
int64_t bytes, BdrvRequestFlags flags)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
uint64_t lba;
uint32_t nb_blocks;
uint64_t nb_blocks;
bool use_16_for_ws = iscsilun->use_16_for_rw;
int r = 0;
@ -1247,11 +1254,21 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
if (use_16_for_ws) {
/*
* iscsi_writesame16_task num_blocks argument is uint32_t. We rely here
* on our max_pwrite_zeroes limit.
*/
assert(nb_blocks <= UINT32_MAX);
iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
iscsilun->zeroblock, iscsilun->block_size,
nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
0, 0, iscsi_co_generic_cb, &iTask);
} else {
/*
* iscsi_writesame10_task num_blocks argument is uint16_t. We rely here
* on our max_pwrite_zeroes limit.
*/
assert(nb_blocks <= UINT16_MAX);
iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba,
iscsilun->zeroblock, iscsilun->block_size,
nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
@ -2061,20 +2078,19 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
}
if (iscsilun->lbp.lbpu) {
if (iscsilun->bl.max_unmap < 0xffffffff / block_size) {
bs->bl.max_pdiscard =
iscsilun->bl.max_unmap * iscsilun->block_size;
}
MIN_NON_ZERO(iscsilun->bl.max_unmap * iscsilun->block_size,
(uint64_t)UINT32_MAX * iscsilun->block_size);
bs->bl.pdiscard_alignment =
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
} else {
bs->bl.pdiscard_alignment = iscsilun->block_size;
}
if (iscsilun->bl.max_ws_len < 0xffffffff / block_size) {
bs->bl.max_pwrite_zeroes =
iscsilun->bl.max_ws_len * iscsilun->block_size;
}
MIN_NON_ZERO(iscsilun->bl.max_ws_len * iscsilun->block_size,
max_xfer_len * iscsilun->block_size);
if (iscsilun->lbp.lbpws) {
bs->bl.pwrite_zeroes_alignment =
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
@ -2169,10 +2185,10 @@ static void coroutine_fn iscsi_co_invalidate_cache(BlockDriverState *bs,
static int coroutine_fn iscsi_co_copy_range_from(BlockDriverState *bs,
BdrvChild *src,
uint64_t src_offset,
int64_t src_offset,
BdrvChild *dst,
uint64_t dst_offset,
uint64_t bytes,
int64_t dst_offset,
int64_t bytes,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
@ -2310,10 +2326,10 @@ static void iscsi_xcopy_data(struct iscsi_data *data,
static int coroutine_fn iscsi_co_copy_range_to(BlockDriverState *bs,
BdrvChild *src,
uint64_t src_offset,
int64_t src_offset,
BdrvChild *dst,
uint64_t dst_offset,
uint64_t bytes,
int64_t dst_offset,
int64_t bytes,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{

View File

@ -334,30 +334,45 @@ static void ioq_submit(LinuxAioState *s)
}
}
static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
{
uint64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
/*
* AIO context can be shared between multiple block devices, so
* `dev_max_batch` allows reducing the batch size for latency-sensitive
* devices.
*/
max_batch = MIN_NON_ZERO(dev_max_batch, max_batch);
/* limit the batch with the number of available events */
max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
return max_batch;
}
void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
{
s->io_q.plugged++;
}
void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s,
uint64_t dev_max_batch)
{
assert(s->io_q.plugged);
if (--s->io_q.plugged == 0 &&
!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch) ||
(--s->io_q.plugged == 0 &&
!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending))) {
ioq_submit(s);
}
}
static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
int type)
int type, uint64_t dev_max_batch)
{
LinuxAioState *s = laiocb->ctx;
struct iocb *iocbs = &laiocb->iocb;
QEMUIOVector *qiov = laiocb->qiov;
int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
/* limit the batch with the number of available events */
max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
switch (type) {
case QEMU_AIO_WRITE:
@ -378,7 +393,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
s->io_q.in_queue++;
if (!s->io_q.blocked &&
(!s->io_q.plugged ||
s->io_q.in_queue >= max_batch)) {
s->io_q.in_queue >= laio_max_batch(s, dev_max_batch))) {
ioq_submit(s);
}
@ -386,7 +401,8 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
}
int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
uint64_t offset, QEMUIOVector *qiov, int type)
uint64_t offset, QEMUIOVector *qiov, int type,
uint64_t dev_max_batch)
{
int ret;
struct qemu_laiocb laiocb = {
@ -398,7 +414,7 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
.qiov = qiov,
};
ret = laio_do_submit(fd, &laiocb, offset, type);
ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch);
if (ret < 0) {
return ret;
}

View File

@ -65,7 +65,7 @@ block_ss.add(when: 'CONFIG_POSIX', if_true: [files('file-posix.c'), coref, iokit
block_ss.add(when: libiscsi, if_true: files('iscsi-opts.c'))
block_ss.add(when: 'CONFIG_LINUX', if_true: files('nvme.c'))
block_ss.add(when: 'CONFIG_REPLICATION', if_true: files('replication.c'))
block_ss.add(when: ['CONFIG_LINUX_AIO', libaio], if_true: files('linux-aio.c'))
block_ss.add(when: libaio, if_true: files('linux-aio.c'))
block_ss.add(when: linux_io_uring, if_true: files('io_uring.c'))
block_modules = {}

View File

@ -56,7 +56,6 @@ typedef struct MirrorBlockJob {
bool zero_target;
MirrorCopyMode copy_mode;
BlockdevOnError on_source_error, on_target_error;
bool synced;
/* Set when the target is synced (dirty bitmap is clean, nothing
* in flight) and the job is running in active mode */
bool actively_synced;
@ -121,7 +120,6 @@ typedef enum MirrorMethod {
static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
int error)
{
s->synced = false;
s->actively_synced = false;
if (read) {
return block_job_error_action(&s->common, s->on_source_error,
@ -944,12 +942,10 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
if (s->bdev_length == 0) {
/* Transition to the READY state and wait for complete. */
job_transition_to_ready(&s->common.job);
s->synced = true;
s->actively_synced = true;
while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
job_yield(&s->common.job);
}
s->common.job.cancelled = false;
goto immediate_exit;
}
@ -1010,6 +1006,11 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
job_pause_point(&s->common.job);
if (job_is_cancelled(&s->common.job)) {
ret = 0;
goto immediate_exit;
}
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
/* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
* the number of bytes currently being processed; together those are
@ -1036,7 +1037,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
should_complete = false;
if (s->in_flight == 0 && cnt == 0) {
trace_mirror_before_flush(s);
if (!s->synced) {
if (!job_is_ready(&s->common.job)) {
if (mirror_flush(s) < 0) {
/* Go check s->ret. */
continue;
@ -1047,14 +1048,13 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
* the target in a consistent state.
*/
job_transition_to_ready(&s->common.job);
s->synced = true;
if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
s->actively_synced = true;
}
}
should_complete = s->should_complete ||
job_is_cancelled(&s->common.job);
job_cancel_requested(&s->common.job);
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
}
@ -1084,24 +1084,17 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
* completion.
*/
assert(QLIST_EMPTY(&bs->tracked_requests));
s->common.job.cancelled = false;
need_drain = false;
break;
}
ret = 0;
if (s->synced && !should_complete) {
if (job_is_ready(&s->common.job) && !should_complete) {
delay_ns = (s->in_flight == 0 &&
cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
}
trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
delay_ns);
job_sleep_ns(&s->common.job, delay_ns);
if (job_is_cancelled(&s->common.job) &&
(!s->synced || s->common.job.force_cancel))
{
break;
}
s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
}
@ -1111,8 +1104,7 @@ immediate_exit:
* or it was cancelled prematurely so that we do not guarantee that
* the target is a copy of the source.
*/
assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
job_is_cancelled(&s->common.job)));
assert(ret < 0 || job_is_cancelled(&s->common.job));
assert(need_drain);
mirror_wait_for_all_io(s);
}
@ -1135,7 +1127,7 @@ static void mirror_complete(Job *job, Error **errp)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
if (!s->synced) {
if (!job_is_ready(job)) {
error_setg(errp, "The active block job '%s' cannot be completed",
job->id);
return;
@ -1190,21 +1182,34 @@ static bool mirror_drained_poll(BlockJob *job)
* from one of our own drain sections, to avoid a deadlock waiting for
* ourselves.
*/
if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) {
if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) {
return true;
}
return !!s->in_flight;
}
static void mirror_cancel(Job *job, bool force)
static bool mirror_cancel(Job *job, bool force)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockDriverState *target = blk_bs(s->target);
if (force || !job_is_ready(job)) {
/*
* Before the job is READY, we treat any cancellation like a
* force-cancellation.
*/
force = force || !job_is_ready(job);
if (force) {
bdrv_cancel_in_flight(target);
}
return force;
}
static bool commit_active_cancel(Job *job, bool force)
{
/* Same as above in mirror_cancel() */
return force || !job_is_ready(job);
}
static const BlockJobDriver mirror_job_driver = {
@ -1234,6 +1239,7 @@ static const BlockJobDriver commit_active_job_driver = {
.abort = mirror_abort,
.pause = mirror_pause,
.complete = mirror_complete,
.cancel = commit_active_cancel,
},
.drained_poll = mirror_drained_poll,
};
@ -1402,7 +1408,7 @@ static void coroutine_fn active_write_settle(MirrorOp *op)
}
static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
{
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}
@ -1417,6 +1423,7 @@ static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
bool copy_to_target;
copy_to_target = s->job->ret >= 0 &&
!job_is_cancelled(&s->job->common.job) &&
s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
if (copy_to_target) {
@ -1456,7 +1463,7 @@ out:
}
static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
{
MirrorBDSOpaque *s = bs->opaque;
QEMUIOVector bounce_qiov;
@ -1465,6 +1472,7 @@ static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
bool copy_to_target;
copy_to_target = s->job->ret >= 0 &&
!job_is_cancelled(&s->job->common.job) &&
s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
if (copy_to_target) {
@ -1501,14 +1509,14 @@ static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
}
static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
{
return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
flags);
}
static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
NULL, 0);

View File

@ -57,7 +57,8 @@
typedef struct {
Coroutine *coroutine;
uint64_t offset; /* original offset of the request */
bool receiving; /* waiting for connection_co? */
bool receiving; /* sleeping in the yield in nbd_receive_replies */
bool reply_possible; /* reply header not yet received */
} NBDClientRequest;
typedef enum NBDClientState {
@ -73,14 +74,10 @@ typedef struct BDRVNBDState {
CoMutex send_mutex;
CoQueue free_sema;
Coroutine *connection_co;
Coroutine *teardown_co;
QemuCoSleep reconnect_sleep;
bool drained;
bool wait_drained_end;
CoMutex receive_mutex;
int in_flight;
NBDClientState state;
bool wait_in_flight;
QEMUTimer *reconnect_delay_timer;
@ -127,33 +124,44 @@ static bool nbd_client_connected(BDRVNBDState *s)
return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED;
}
static bool nbd_recv_coroutine_wake_one(NBDClientRequest *req)
{
if (req->receiving) {
req->receiving = false;
aio_co_wake(req->coroutine);
return true;
}
return false;
}
static void nbd_recv_coroutines_wake(BDRVNBDState *s, bool all)
{
int i;
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
if (nbd_recv_coroutine_wake_one(&s->requests[i]) && !all) {
return;
}
}
}
static void nbd_channel_error(BDRVNBDState *s, int ret)
{
if (nbd_client_connected(s)) {
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
}
if (ret == -EIO) {
if (nbd_client_connected(s)) {
s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
NBD_CLIENT_CONNECTING_NOWAIT;
}
} else {
if (nbd_client_connected(s)) {
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
}
s->state = NBD_CLIENT_QUIT;
}
}
static void nbd_recv_coroutines_wake_all(BDRVNBDState *s)
{
int i;
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
NBDClientRequest *req = &s->requests[i];
if (req->coroutine && req->receiving) {
req->receiving = false;
aio_co_wake(req->coroutine);
}
}
nbd_recv_coroutines_wake(s, true);
}
static void reconnect_delay_timer_del(BDRVNBDState *s)
@ -170,6 +178,7 @@ static void reconnect_delay_timer_cb(void *opaque)
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
nbd_co_establish_connection_cancel(s->conn);
while (qemu_co_enter_next(&s->free_sema, NULL)) {
/* Resume all queued requests */
}
@ -192,113 +201,21 @@ static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
timer_mod(s->reconnect_delay_timer, expire_time_ns);
}
static void nbd_client_detach_aio_context(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
/* Timer is deleted in nbd_client_co_drain_begin() */
assert(!s->reconnect_delay_timer);
/*
* If reconnect is in progress we may have no ->ioc. It will be
* re-instantiated in the proper aio context once the connection is
* reestablished.
*/
if (s->ioc) {
qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
}
}
static void nbd_client_attach_aio_context_bh(void *opaque)
{
BlockDriverState *bs = opaque;
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
if (s->connection_co) {
/*
* The node is still drained, so we know the coroutine has yielded in
* nbd_read_eof(), the only place where bs->in_flight can reach 0, or
* it is entered for the first time. Both places are safe for entering
* the coroutine.
*/
qemu_aio_coroutine_enter(bs->aio_context, s->connection_co);
}
bdrv_dec_in_flight(bs);
}
static void nbd_client_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
/*
* s->connection_co is either yielded from nbd_receive_reply or from
* nbd_co_reconnect_loop()
*/
if (nbd_client_connected(s)) {
qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context);
}
bdrv_inc_in_flight(bs);
/*
* Need to wait here for the BH to run because the BH must run while the
* node is still drained.
*/
aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs);
}
static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
s->drained = true;
qemu_co_sleep_wake(&s->reconnect_sleep);
nbd_co_establish_connection_cancel(s->conn);
reconnect_delay_timer_del(s);
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
qemu_co_queue_restart_all(&s->free_sema);
}
}
static void coroutine_fn nbd_client_co_drain_end(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
s->drained = false;
if (s->wait_drained_end) {
s->wait_drained_end = false;
aio_co_wake(s->connection_co);
}
}
static void nbd_teardown_connection(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
assert(!s->in_flight);
if (s->ioc) {
/* finish any pending coroutines */
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
nbd_yank, s->bs);
object_unref(OBJECT(s->ioc));
s->ioc = NULL;
}
s->state = NBD_CLIENT_QUIT;
if (s->connection_co) {
qemu_co_sleep_wake(&s->reconnect_sleep);
nbd_co_establish_connection_cancel(s->conn);
}
if (qemu_in_coroutine()) {
s->teardown_co = qemu_coroutine_self();
/* connection_co resumes us when it terminates */
qemu_coroutine_yield();
s->teardown_co = NULL;
} else {
BDRV_POLL_WHILE(bs, s->connection_co);
}
assert(!s->connection_co);
}
static bool nbd_client_connecting(BDRVNBDState *s)
@ -363,10 +280,11 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
int ret;
bool blocking = nbd_client_connecting_wait(s);
assert(!s->ioc);
s->ioc = nbd_co_establish_connection(s->conn, &s->info, true, errp);
s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp);
if (!s->ioc) {
return -ECONNREFUSED;
}
@ -402,29 +320,22 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
return 0;
}
/* called under s->send_mutex */
static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
{
if (!nbd_client_connecting(s)) {
return;
}
assert(nbd_client_connecting(s));
assert(s->in_flight == 0);
/* Wait for completion of all in-flight requests */
qemu_co_mutex_lock(&s->send_mutex);
while (s->in_flight > 0) {
qemu_co_mutex_unlock(&s->send_mutex);
nbd_recv_coroutines_wake_all(s);
s->wait_in_flight = true;
qemu_coroutine_yield();
s->wait_in_flight = false;
qemu_co_mutex_lock(&s->send_mutex);
}
qemu_co_mutex_unlock(&s->send_mutex);
if (!nbd_client_connecting(s)) {
return;
if (nbd_client_connecting_wait(s) && s->reconnect_delay &&
!s->reconnect_delay_timer)
{
/*
* It's first reconnect attempt after switching to
* NBD_CLIENT_CONNECTING_WAIT
*/
reconnect_delay_timer_init(s,
qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
s->reconnect_delay * NANOSECONDS_PER_SECOND);
}
/*
@ -444,135 +355,73 @@ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
nbd_co_do_establish_connection(s->bs, NULL);
}
static coroutine_fn void nbd_co_reconnect_loop(BDRVNBDState *s)
static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
{
uint64_t timeout = 1 * NANOSECONDS_PER_SECOND;
uint64_t max_timeout = 16 * NANOSECONDS_PER_SECOND;
int ret;
uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2;
QEMU_LOCK_GUARD(&s->receive_mutex);
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
reconnect_delay_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
s->reconnect_delay * NANOSECONDS_PER_SECOND);
}
nbd_reconnect_attempt(s);
while (nbd_client_connecting(s)) {
if (s->drained) {
bdrv_dec_in_flight(s->bs);
s->wait_drained_end = true;
while (s->drained) {
/*
* We may be entered once from nbd_client_attach_aio_context_bh
* and then from nbd_client_co_drain_end. So here is a loop.
*/
qemu_coroutine_yield();
}
bdrv_inc_in_flight(s->bs);
} else {
qemu_co_sleep_ns_wakeable(&s->reconnect_sleep,
QEMU_CLOCK_REALTIME, timeout);
if (s->drained) {
continue;
}
if (timeout < max_timeout) {
timeout *= 2;
}
}
nbd_reconnect_attempt(s);
}
reconnect_delay_timer_del(s);
}
static coroutine_fn void nbd_connection_entry(void *opaque)
{
BDRVNBDState *s = opaque;
uint64_t i;
int ret = 0;
Error *local_err = NULL;
while (qatomic_load_acquire(&s->state) != NBD_CLIENT_QUIT) {
/*
* The NBD client can only really be considered idle when it has
* yielded from qio_channel_readv_all_eof(), waiting for data. This is
* the point where the additional scheduled coroutine entry happens
* after nbd_client_attach_aio_context().
*
* Therefore we keep an additional in_flight reference all the time and
* only drop it temporarily here.
*/
if (nbd_client_connecting(s)) {
nbd_co_reconnect_loop(s);
while (true) {
if (s->reply.handle == handle) {
/* We are done */
return 0;
}
if (!nbd_client_connected(s)) {
continue;
}
assert(s->reply.handle == 0);
ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err);
if (local_err) {
trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err));
error_free(local_err);
local_err = NULL;
}
if (ret <= 0) {
nbd_channel_error(s, ret ? ret : -EIO);
continue;
return -EIO;
}
if (s->reply.handle != 0) {
/*
* There's no need for a mutex on the receive side, because the
* handler acts as a synchronization point and ensures that only
* one coroutine is called until the reply finishes.
* Some other request is being handled now. It should already be
* woken by whoever set s->reply.handle (or never wait in this
* yield). So, we should not wake it here.
*/
i = HANDLE_TO_INDEX(s, s->reply.handle);
if (i >= MAX_NBD_REQUESTS ||
!s->requests[i].coroutine ||
!s->requests[i].receiving ||
(nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
{
nbd_channel_error(s, -EINVAL);
continue;
}
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
assert(!s->requests[ind2].receiving);
s->requests[ind].receiving = true;
qemu_co_mutex_unlock(&s->receive_mutex);
/*
* We're woken up again by the request itself. Note that there
* is no race between yielding and reentering connection_co. This
* is because:
*
* - if the request runs on the same AioContext, it is only
* entered after we yield
*
* - if the request runs on a different AioContext, reentering
* connection_co happens through a bottom half, which can only
* run after we yield.
*/
s->requests[i].receiving = false;
aio_co_wake(s->requests[i].coroutine);
qemu_coroutine_yield();
/*
* We may be woken for 3 reasons:
* 1. From this function, executing in parallel coroutine, when our
* handle is received.
* 2. From nbd_channel_error(), when connection is lost.
* 3. From nbd_co_receive_one_chunk(), when previous request is
* finished and s->reply.handle set to 0.
* Anyway, it's OK to lock the mutex and go to the next iteration.
*/
qemu_co_mutex_lock(&s->receive_mutex);
assert(!s->requests[ind].receiving);
continue;
}
qemu_co_queue_restart_all(&s->free_sema);
nbd_recv_coroutines_wake_all(s);
bdrv_dec_in_flight(s->bs);
s->connection_co = NULL;
if (s->ioc) {
qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
nbd_yank, s->bs);
object_unref(OBJECT(s->ioc));
s->ioc = NULL;
/* We are under mutex and handle is 0. We have to do the dirty work. */
assert(s->reply.handle == 0);
ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL);
if (ret <= 0) {
ret = ret ? ret : -EIO;
nbd_channel_error(s, ret);
return ret;
}
if (s->teardown_co) {
aio_co_wake(s->teardown_co);
if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) {
nbd_channel_error(s, -EINVAL);
return -EINVAL;
}
if (s->reply.handle == handle) {
/* We are done */
return 0;
}
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].reply_possible) {
nbd_channel_error(s, -EINVAL);
return -EINVAL;
}
nbd_recv_coroutine_wake_one(&s->requests[ind2]);
}
aio_wait_kick();
}
static int nbd_co_send_request(BlockDriverState *bs,
@ -583,10 +432,17 @@ static int nbd_co_send_request(BlockDriverState *bs,
int rc, i = -1;
qemu_co_mutex_lock(&s->send_mutex);
while (s->in_flight == MAX_NBD_REQUESTS || nbd_client_connecting_wait(s)) {
while (s->in_flight == MAX_NBD_REQUESTS ||
(!nbd_client_connected(s) && s->in_flight > 0))
{
qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
}
if (nbd_client_connecting(s)) {
nbd_reconnect_attempt(s);
}
if (!nbd_client_connected(s)) {
rc = -EIO;
goto err;
@ -606,6 +462,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
s->requests[i].coroutine = qemu_coroutine_self();
s->requests[i].offset = request->from;
s->requests[i].receiving = false;
s->requests[i].reply_possible = true;
request->handle = INDEX_TO_HANDLE(s, i);
@ -633,10 +490,6 @@ err:
if (i != -1) {
s->requests[i].coroutine = NULL;
s->in_flight--;
}
if (s->in_flight == 0 && s->wait_in_flight) {
aio_co_wake(s->connection_co);
} else {
qemu_co_queue_next(&s->free_sema);
}
}
@ -935,10 +788,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
}
*request_ret = 0;
/* Wait until we're woken up by nbd_connection_entry. */
s->requests[i].receiving = true;
qemu_coroutine_yield();
assert(!s->requests[i].receiving);
nbd_receive_replies(s, handle);
if (!nbd_client_connected(s)) {
error_setg(errp, "Connection closed");
return -EIO;
@ -1031,14 +881,7 @@ static coroutine_fn int nbd_co_receive_one_chunk(
}
s->reply.handle = 0;
if (s->connection_co && !s->wait_in_flight) {
/*
* We must check s->wait_in_flight, because we may entered by
* nbd_recv_coroutines_wake_all(), in this case we should not
* wake connection_co here, it will woken by last request.
*/
aio_co_wake(s->connection_co);
}
nbd_recv_coroutines_wake(s, false);
return ret;
}
@ -1149,11 +992,7 @@ break_loop:
qemu_co_mutex_lock(&s->send_mutex);
s->in_flight--;
if (s->in_flight == 0 && s->wait_in_flight) {
aio_co_wake(s->connection_co);
} else {
qemu_co_queue_next(&s->free_sema);
}
qemu_co_mutex_unlock(&s->send_mutex);
return false;
@ -1322,8 +1161,9 @@ static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
return ret ? ret : request_ret;
}
static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int flags)
static int nbd_client_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret, request_ret;
Error *local_err = NULL;
@ -1380,8 +1220,9 @@ static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
return ret ? ret : request_ret;
}
static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int flags)
static int nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = {
@ -1405,15 +1246,17 @@ static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
}
static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int bytes, BdrvRequestFlags flags)
int64_t bytes, BdrvRequestFlags flags)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = {
.type = NBD_CMD_WRITE_ZEROES,
.from = offset,
.len = bytes,
.len = bytes, /* .len is uint32_t actually */
};
assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
return -ENOTSUP;
@ -1453,15 +1296,17 @@ static int nbd_client_co_flush(BlockDriverState *bs)
}
static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset,
int bytes)
int64_t bytes)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = {
.type = NBD_CMD_TRIM,
.from = offset,
.len = bytes,
.len = bytes, /* len is uint32_t */
};
assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
return 0;
@ -1969,6 +1814,7 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
s->bs = bs;
qemu_co_mutex_init(&s->send_mutex);
qemu_co_queue_init(&s->free_sema);
qemu_co_mutex_init(&s->receive_mutex);
if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
return -EEXIST;
@ -1983,14 +1829,13 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
s->x_dirty_bitmap, s->tlscreds);
/* TODO: Configurable retry-until-timeout behaviour. */
s->state = NBD_CLIENT_CONNECTING_WAIT;
ret = nbd_do_establish_connection(bs, errp);
if (ret < 0) {
goto fail;
}
s->connection_co = qemu_coroutine_create(nbd_connection_entry, s);
bdrv_inc_in_flight(bs);
aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co);
nbd_client_connection_enable_retry(s->conn);
return 0;
@ -2144,6 +1989,8 @@ static void nbd_cancel_in_flight(BlockDriverState *bs)
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
qemu_co_queue_restart_all(&s->free_sema);
}
nbd_co_establish_connection_cancel(s->conn);
}
static BlockDriver bdrv_nbd = {
@ -2164,10 +2011,6 @@ static BlockDriver bdrv_nbd = {
.bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_co_truncate = nbd_co_truncate,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
.bdrv_co_drain_end = nbd_client_co_drain_end,
.bdrv_refresh_filename = nbd_refresh_filename,
.bdrv_co_block_status = nbd_client_co_block_status,
.bdrv_dirname = nbd_dirname,
@ -2193,10 +2036,6 @@ static BlockDriver bdrv_nbd_tcp = {
.bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_co_truncate = nbd_co_truncate,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
.bdrv_co_drain_end = nbd_client_co_drain_end,
.bdrv_refresh_filename = nbd_refresh_filename,
.bdrv_co_block_status = nbd_client_co_block_status,
.bdrv_dirname = nbd_dirname,
@ -2222,10 +2061,6 @@ static BlockDriver bdrv_nbd_unix = {
.bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_co_truncate = nbd_co_truncate,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
.bdrv_co_drain_end = nbd_client_co_drain_end,
.bdrv_refresh_filename = nbd_refresh_filename,
.bdrv_co_block_status = nbd_client_co_block_status,
.bdrv_dirname = nbd_dirname,

View File

@ -262,9 +262,9 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
nfs_co_generic_bh_cb, task);
}
static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *iov,
int flags)
static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *iov,
BdrvRequestFlags flags)
{
NFSClient *client = bs->opaque;
NFSRPC task;
@ -296,9 +296,9 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
return 0;
}
static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *iov,
int flags)
static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *iov,
BdrvRequestFlags flags)
{
NFSClient *client = bs->opaque;
NFSRPC task;

View File

@ -116,8 +116,9 @@ static coroutine_fn int null_co_common(BlockDriverState *bs)
}
static coroutine_fn int null_co_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BDRVNullState *s = bs->opaque;
@ -129,8 +130,9 @@ static coroutine_fn int null_co_preadv(BlockDriverState *bs,
}
static coroutine_fn int null_co_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
return null_co_common(bs);
}
@ -187,8 +189,8 @@ static inline BlockAIOCB *null_aio_common(BlockDriverState *bs,
}
static BlockAIOCB *null_aio_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb,
void *opaque)
{
@ -202,8 +204,8 @@ static BlockAIOCB *null_aio_preadv(BlockDriverState *bs,
}
static BlockAIOCB *null_aio_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb,
void *opaque)
{

View File

@ -183,15 +183,20 @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
return r == 0;
}
static void nvme_free_queue(NVMeQueue *q)
{
qemu_vfree(q->queue);
}
static void nvme_free_queue_pair(NVMeQueuePair *q)
{
trace_nvme_free_queue_pair(q->index, q);
trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq);
if (q->completion_bh) {
qemu_bh_delete(q->completion_bh);
}
nvme_free_queue(&q->sq);
nvme_free_queue(&q->cq);
qemu_vfree(q->prp_list_pages);
qemu_vfree(q->sq.queue);
qemu_vfree(q->cq.queue);
qemu_mutex_destroy(&q->lock);
g_free(q);
}
@ -514,10 +519,10 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
{
BDRVNVMeState *s = bs->opaque;
bool ret = false;
union {
QEMU_AUTO_VFREE union {
NvmeIdCtrl ctrl;
NvmeIdNs ns;
} *id;
} *id = NULL;
NvmeLBAF *lbaf;
uint16_t oncs;
int r;
@ -595,7 +600,6 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
s->blkshift = lbaf->ds;
out:
qemu_vfio_dma_unmap(s->vfio, id);
qemu_vfree(id);
return ret;
}
@ -1219,7 +1223,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
{
BDRVNVMeState *s = bs->opaque;
int r;
uint8_t *buf = NULL;
QEMU_AUTO_VFREE uint8_t *buf = NULL;
QEMUIOVector local_qiov;
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
assert(QEMU_IS_ALIGNED(offset, s->page_size));
@ -1246,20 +1250,21 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
if (!r && !is_write) {
qemu_iovec_from_buf(qiov, 0, buf, bytes);
}
qemu_vfree(buf);
return r;
}
static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
}
static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
}
@ -1294,19 +1299,29 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset,
int bytes,
int64_t bytes,
BdrvRequestFlags flags)
{
BDRVNVMeState *s = bs->opaque;
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
NVMeRequest *req;
uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
uint32_t cdw12;
if (!s->supports_write_zeroes) {
return -ENOTSUP;
}
if (bytes == 0) {
return 0;
}
cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
/*
* We should not lose information. pwrite_zeroes_alignment and
* max_pwrite_zeroes guarantees it.
*/
assert(((cdw12 + 1) << s->blkshift) == bytes);
NvmeCmd cmd = {
.opcode = NVME_CMD_WRITE_ZEROES,
.nsid = cpu_to_le32(s->nsid),
@ -1348,12 +1363,12 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
int64_t offset,
int bytes)
int64_t bytes)
{
BDRVNVMeState *s = bs->opaque;
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
NVMeRequest *req;
NvmeDsmRange *buf;
QEMU_AUTO_VFREE NvmeDsmRange *buf = NULL;
QEMUIOVector local_qiov;
int ret;
@ -1375,6 +1390,14 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
assert(s->queue_count > 1);
/*
* Filling the @buf requires @offset and @bytes to satisfy restrictions
* defined in nvme_refresh_limits().
*/
assert(QEMU_IS_ALIGNED(bytes, 1UL << s->blkshift));
assert(QEMU_IS_ALIGNED(offset, 1UL << s->blkshift));
assert((bytes >> s->blkshift) <= UINT32_MAX);
buf = qemu_try_memalign(s->page_size, s->page_size);
if (!buf) {
return -ENOMEM;
@ -1420,7 +1443,6 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
trace_nvme_dsm_done(s, offset, bytes, ret);
out:
qemu_iovec_destroy(&local_qiov);
qemu_vfree(buf);
return ret;
}
@ -1470,6 +1492,18 @@ static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.opt_mem_alignment = s->page_size;
bs->bl.request_alignment = s->page_size;
bs->bl.max_transfer = s->max_transfer;
/*
* Look at nvme_co_pwrite_zeroes: after shift and decrement we should get
* at most 0xFFFF
*/
bs->bl.max_pwrite_zeroes = 1ULL << (s->blkshift + 16);
bs->bl.pwrite_zeroes_alignment = MAX(bs->bl.request_alignment,
1UL << s->blkshift);
bs->bl.max_pdiscard = (uint64_t)UINT32_MAX << s->blkshift;
bs->bl.pdiscard_alignment = MAX(bs->bl.request_alignment,
1UL << s->blkshift);
}
static void nvme_detach_aio_context(BlockDriverState *bs)

View File

@ -227,15 +227,15 @@ static void preallocate_reopen_abort(BDRVReopenState *state)
}
static coroutine_fn int preallocate_co_preadv_part(
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset, int flags)
BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags)
{
return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset,
flags);
}
static int coroutine_fn preallocate_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
return bdrv_co_pdiscard(bs->file, offset, bytes);
}
@ -337,7 +337,7 @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset,
}
static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
{
bool want_merge_zero =
!(flags & ~(BDRV_REQ_ZERO_WRITE | BDRV_REQ_NO_FALLBACK));
@ -349,11 +349,11 @@ static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs,
}
static coroutine_fn int preallocate_co_pwritev_part(BlockDriverState *bs,
uint64_t offset,
uint64_t bytes,
int64_t offset,
int64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset,
int flags)
BdrvRequestFlags flags)
{
handle_write(bs, offset, bytes, false);

View File

@ -617,9 +617,9 @@ static void qcow_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.request_alignment = BDRV_SECTOR_SIZE;
}
static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BDRVQcowState *s = bs->opaque;
int offset_in_cluster;
@ -714,9 +714,9 @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
return ret;
}
static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BDRVQcowState *s = bs->opaque;
int offset_in_cluster;
@ -1047,8 +1047,8 @@ static int qcow_make_empty(BlockDriverState *bs)
/* XXX: put compressed sectors first, then all the cluster aligned
tables to avoid losing bytes in alignment */
static coroutine_fn int
qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov)
qcow_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov)
{
BDRVQcowState *s = bs->opaque;
z_stream strm;

View File

@ -505,7 +505,20 @@ static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
return -ENOMEDIUM;
}
/* Call .bdrv_co_readv() directly instead of using the public block-layer
/*
* We never deal with requests that don't satisfy
* bdrv_check_qiov_request(), and aligning requests to clusters never
* breaks this condition. So, do some assertions before calling
* bs->drv->bdrv_co_preadv_part() which has int64_t arguments.
*/
assert(src_cluster_offset <= INT64_MAX);
assert(src_cluster_offset + offset_in_cluster <= INT64_MAX);
/* Cast qiov->size to uint64_t to silence a compiler warning on -m32 */
assert((uint64_t)qiov->size <= INT64_MAX);
bdrv_check_qiov_request(src_cluster_offset + offset_in_cluster, qiov->size,
qiov, 0, &error_abort);
/*
* Call .bdrv_co_readv() directly instead of using the public block-layer
* interface. This avoids double I/O throttling and request tracking,
* which can lead to deadlock when block layer copy-on-read is enabled.
*/

View File

@ -2310,9 +2310,10 @@ static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task)
}
static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset, int flags)
size_t qiov_offset,
BdrvRequestFlags flags)
{
BDRVQcow2State *s = bs->opaque;
int ret = 0;
@ -2596,8 +2597,8 @@ static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task)
}
static coroutine_fn int qcow2_co_pwritev_part(
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset, int flags)
BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags)
{
BDRVQcow2State *s = bs->opaque;
int offset_in_cluster;
@ -3940,7 +3941,7 @@ static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
}
static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
{
int ret;
BDRVQcow2State *s = bs->opaque;
@ -3995,7 +3996,7 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
}
static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
int ret;
BDRVQcow2State *s = bs->opaque;
@ -4025,9 +4026,9 @@ static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
static int coroutine_fn
qcow2_co_copy_range_from(BlockDriverState *bs,
BdrvChild *src, uint64_t src_offset,
BdrvChild *dst, uint64_t dst_offset,
uint64_t bytes, BdrvRequestFlags read_flags,
BdrvChild *src, int64_t src_offset,
BdrvChild *dst, int64_t dst_offset,
int64_t bytes, BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
BDRVQcow2State *s = bs->opaque;
@ -4108,9 +4109,9 @@ out:
static int coroutine_fn
qcow2_co_copy_range_to(BlockDriverState *bs,
BdrvChild *src, uint64_t src_offset,
BdrvChild *dst, uint64_t dst_offset,
uint64_t bytes, BdrvRequestFlags read_flags,
BdrvChild *src, int64_t src_offset,
BdrvChild *dst, int64_t dst_offset,
int64_t bytes, BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
BDRVQcow2State *s = bs->opaque;
@ -4630,7 +4631,7 @@ static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task)
*/
static coroutine_fn int
qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset)
{
BDRVQcow2State *s = bs->opaque;
@ -5227,24 +5228,55 @@ static int qcow2_has_zero_init(BlockDriverState *bs)
}
}
/*
* Check the request to vmstate. On success return
* qcow2_vm_state_offset(bs) + @pos
*/
static int64_t qcow2_check_vmstate_request(BlockDriverState *bs,
QEMUIOVector *qiov, int64_t pos)
{
BDRVQcow2State *s = bs->opaque;
int64_t vmstate_offset = qcow2_vm_state_offset(s);
int ret;
/* Incoming requests must be OK */
bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort);
if (INT64_MAX - pos < vmstate_offset) {
return -EIO;
}
pos += vmstate_offset;
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
if (ret < 0) {
return ret;
}
return pos;
}
static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
int64_t pos)
{
BDRVQcow2State *s = bs->opaque;
int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
if (offset < 0) {
return offset;
}
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
return bs->drv->bdrv_co_pwritev_part(bs, qcow2_vm_state_offset(s) + pos,
qiov->size, qiov, 0, 0);
return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0);
}
static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
int64_t pos)
{
BDRVQcow2State *s = bs->opaque;
int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
if (offset < 0) {
return offset;
}
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
return bs->drv->bdrv_co_preadv_part(bs, qcow2_vm_state_offset(s) + pos,
qiov->size, qiov, 0, 0);
return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0);
}
/*

View File

@ -582,6 +582,7 @@ static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
BDRVQEDState *s = bs->opaque;
bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size);
}
/* We have nothing to do for QED reopen, stubs just return
@ -1397,7 +1398,7 @@ static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset,
int bytes,
int64_t bytes,
BdrvRequestFlags flags)
{
BDRVQEDState *s = bs->opaque;
@ -1408,6 +1409,12 @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
*/
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
/*
* QED is not prepared for 63bit write-zero requests, so rely on
* max_pwrite_zeroes.
*/
assert(bytes <= INT_MAX);
/* Fall back if the request is not aligned */
if (qed_offset_into_cluster(s, offset) ||
qed_offset_into_cluster(s, bytes)) {

View File

@ -663,8 +663,8 @@ static int read_fifo_child(QuorumAIOCB *acb)
return ret;
}
static int quorum_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int flags)
static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVQuorumState *s = bs->opaque;
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
@ -714,8 +714,9 @@ static void write_quorum_entry(void *opaque)
}
}
static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int flags)
static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BDRVQuorumState *s = bs->opaque;
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
@ -745,7 +746,7 @@ static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset,
}
static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int bytes, BdrvRequestFlags flags)
int64_t bytes, BdrvRequestFlags flags)
{
return quorum_co_pwritev(bs, offset, bytes, NULL,

View File

@ -181,8 +181,8 @@ static void raw_reopen_abort(BDRVReopenState *state)
}
/* Check and adjust the offset, against 'offset' and 'size' options. */
static inline int raw_adjust_offset(BlockDriverState *bs, uint64_t *offset,
uint64_t bytes, bool is_write)
static inline int raw_adjust_offset(BlockDriverState *bs, int64_t *offset,
int64_t bytes, bool is_write)
{
BDRVRawState *s = bs->opaque;
@ -201,9 +201,9 @@ static inline int raw_adjust_offset(BlockDriverState *bs, uint64_t *offset,
return 0;
}
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret;
@ -216,9 +216,9 @@ static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset,
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
}
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
void *buf = NULL;
BlockDriver *drv;
@ -289,12 +289,12 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
}
static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes,
int64_t offset, int64_t bytes,
BdrvRequestFlags flags)
{
int ret;
ret = raw_adjust_offset(bs, (uint64_t *)&offset, bytes, true);
ret = raw_adjust_offset(bs, &offset, bytes, true);
if (ret) {
return ret;
}
@ -302,11 +302,11 @@ static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs,
}
static int coroutine_fn raw_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
int ret;
ret = raw_adjust_offset(bs, (uint64_t *)&offset, bytes, true);
ret = raw_adjust_offset(bs, &offset, bytes, true);
if (ret) {
return ret;
}
@ -532,10 +532,10 @@ static int raw_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs,
BdrvChild *src,
uint64_t src_offset,
int64_t src_offset,
BdrvChild *dst,
uint64_t dst_offset,
uint64_t bytes,
int64_t dst_offset,
int64_t bytes,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
@ -551,10 +551,10 @@ static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs,
static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
BdrvChild *src,
uint64_t src_offset,
int64_t src_offset,
BdrvChild *dst,
uint64_t dst_offset,
uint64_t bytes,
int64_t dst_offset,
int64_t bytes,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{

View File

@ -97,6 +97,12 @@ typedef struct RBDTask {
int64_t ret;
} RBDTask;
typedef struct RBDDiffIterateReq {
uint64_t offs;
uint64_t bytes;
bool exists;
} RBDDiffIterateReq;
static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx,
BlockdevOptionsRbd *opts, bool cache,
const char *keypairs, const char *secretid,
@ -1164,17 +1170,17 @@ static int coroutine_fn qemu_rbd_start_co(BlockDriverState *bs,
}
static int
coroutine_fn qemu_rbd_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
coroutine_fn qemu_rbd_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
return qemu_rbd_start_co(bs, offset, bytes, qiov, flags, RBD_AIO_READ);
}
static int
coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BDRVRBDState *s = bs->opaque;
/*
@ -1197,17 +1203,17 @@ static int coroutine_fn qemu_rbd_co_flush(BlockDriverState *bs)
}
static int coroutine_fn qemu_rbd_co_pdiscard(BlockDriverState *bs,
int64_t offset, int count)
int64_t offset, int64_t bytes)
{
return qemu_rbd_start_co(bs, offset, count, NULL, 0, RBD_AIO_DISCARD);
return qemu_rbd_start_co(bs, offset, bytes, NULL, 0, RBD_AIO_DISCARD);
}
#ifdef LIBRBD_SUPPORTS_WRITE_ZEROES
static int
coroutine_fn qemu_rbd_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int count, BdrvRequestFlags flags)
int64_t bytes, BdrvRequestFlags flags)
{
return qemu_rbd_start_co(bs, offset, count, NULL, flags,
return qemu_rbd_start_co(bs, offset, bytes, NULL, flags,
RBD_AIO_WRITE_ZEROES);
}
#endif
@ -1259,6 +1265,111 @@ static ImageInfoSpecific *qemu_rbd_get_specific_info(BlockDriverState *bs,
return spec_info;
}
/*
* rbd_diff_iterate2 allows to interrupt the exection by returning a negative
* value in the callback routine. Choose a value that does not conflict with
* an existing exitcode and return it if we want to prematurely stop the
* execution because we detected a change in the allocation status.
*/
#define QEMU_RBD_EXIT_DIFF_ITERATE2 -9000
static int qemu_rbd_diff_iterate_cb(uint64_t offs, size_t len,
int exists, void *opaque)
{
RBDDiffIterateReq *req = opaque;
assert(req->offs + req->bytes <= offs);
/*
* we do not diff against a snapshot so we should never receive a callback
* for a hole.
*/
assert(exists);
if (!req->exists && offs > req->offs) {
/*
* we started in an unallocated area and hit the first allocated
* block. req->bytes must be set to the length of the unallocated area
* before the allocated area. stop further processing.
*/
req->bytes = offs - req->offs;
return QEMU_RBD_EXIT_DIFF_ITERATE2;
}
if (req->exists && offs > req->offs + req->bytes) {
/*
* we started in an allocated area and jumped over an unallocated area,
* req->bytes contains the length of the allocated area before the
* unallocated area. stop further processing.
*/
return QEMU_RBD_EXIT_DIFF_ITERATE2;
}
req->bytes += len;
req->exists = true;
return 0;
}
static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs,
bool want_zero, int64_t offset,
int64_t bytes, int64_t *pnum,
int64_t *map,
BlockDriverState **file)
{
BDRVRBDState *s = bs->opaque;
int status, r;
RBDDiffIterateReq req = { .offs = offset };
uint64_t features, flags;
assert(offset + bytes <= s->image_size);
/* default to all sectors allocated */
status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
*map = offset;
*file = bs;
*pnum = bytes;
/* check if RBD image supports fast-diff */
r = rbd_get_features(s->image, &features);
if (r < 0) {
return status;
}
if (!(features & RBD_FEATURE_FAST_DIFF)) {
return status;
}
/* check if RBD fast-diff result is valid */
r = rbd_get_flags(s->image, &flags);
if (r < 0) {
return status;
}
if (flags & RBD_FLAG_FAST_DIFF_INVALID) {
return status;
}
r = rbd_diff_iterate2(s->image, NULL, offset, bytes, true, true,
qemu_rbd_diff_iterate_cb, &req);
if (r < 0 && r != QEMU_RBD_EXIT_DIFF_ITERATE2) {
return status;
}
assert(req.bytes <= bytes);
if (!req.exists) {
if (r == 0) {
/*
* rbd_diff_iterate2 does not invoke callbacks for unallocated
* areas. This here catches the case where no callback was
* invoked at all (req.bytes == 0).
*/
assert(req.bytes == 0);
req.bytes = bytes;
}
status = BDRV_BLOCK_ZERO | BDRV_BLOCK_OFFSET_VALID;
}
*pnum = req.bytes;
return status;
}
static int64_t qemu_rbd_getlength(BlockDriverState *bs)
{
BDRVRBDState *s = bs->opaque;
@ -1494,6 +1605,7 @@ static BlockDriver bdrv_rbd = {
#ifdef LIBRBD_SUPPORTS_WRITE_ZEROES
.bdrv_co_pwrite_zeroes = qemu_rbd_co_pwrite_zeroes,
#endif
.bdrv_co_block_status = qemu_rbd_co_block_status,
.bdrv_snapshot_create = qemu_rbd_snap_create,
.bdrv_snapshot_delete = qemu_rbd_snap_remove,

View File

@ -149,7 +149,7 @@ static void replication_close(BlockDriverState *bs)
if (s->stage == BLOCK_REPLICATION_FAILOVER) {
commit_job = &s->commit_job->job;
assert(commit_job->aio_context == qemu_get_current_aio_context());
job_cancel_sync(commit_job);
job_cancel_sync(commit_job, false);
}
if (s->mode == REPLICATION_MODE_SECONDARY) {
@ -726,7 +726,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
* disk, secondary disk in backup_job_completed().
*/
if (s->backup_job) {
job_cancel_sync(&s->backup_job->job);
job_cancel_sync(&s->backup_job->job, true);
}
if (!failover) {

View File

@ -112,8 +112,9 @@ static int64_t throttle_getlength(BlockDriverState *bs)
}
static int coroutine_fn throttle_co_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
ThrottleGroupMember *tgm = bs->opaque;
@ -123,8 +124,9 @@ static int coroutine_fn throttle_co_preadv(BlockDriverState *bs,
}
static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
ThrottleGroupMember *tgm = bs->opaque;
throttle_group_co_io_limits_intercept(tgm, bytes, true);
@ -133,7 +135,7 @@ static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs,
}
static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes,
int64_t offset, int64_t bytes,
BdrvRequestFlags flags)
{
ThrottleGroupMember *tgm = bs->opaque;
@ -143,7 +145,7 @@ static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs,
}
static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
int64_t offset, int64_t bytes)
{
ThrottleGroupMember *tgm = bs->opaque;
throttle_group_co_io_limits_intercept(tgm, bytes, true);
@ -152,8 +154,8 @@ static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs,
}
static int coroutine_fn throttle_co_pwritev_compressed(BlockDriverState *bs,
uint64_t offset,
uint64_t bytes,
int64_t offset,
int64_t bytes,
QEMUIOVector *qiov)
{
return throttle_co_pwritev(bs, offset, bytes, qiov,

View File

@ -5,8 +5,8 @@ bdrv_open_common(void *bs, const char *filename, int flags, const char *format_n
bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
# block-backend.c
blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"
blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"
blk_co_preadv(void *blk, void *bs, int64_t offset, int64_t bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %" PRId64 " flags 0x%x"
blk_co_pwritev(void *blk, void *bs, int64_t offset, int64_t bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %" PRId64 " flags 0x%x"
blk_root_attach(void *child, void *blk, void *bs) "child %p blk %p bs %p"
blk_root_detach(void *child, void *blk, void *bs) "child %p blk %p bs %p"
@ -75,13 +75,13 @@ luring_resubmit_short_read(void *s, void *luringcb, int nread) "LuringState %p l
# qcow2.c
qcow2_add_task(void *co, void *bs, void *pool, const char *action, int cluster_type, uint64_t host_offset, uint64_t offset, uint64_t bytes, void *qiov, size_t qiov_offset) "co %p bs %p pool %p: %s: cluster_type %d file_cluster_offset %" PRIu64 " offset %" PRIu64 " bytes %" PRIu64 " qiov %p qiov_offset %zu"
qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset 0x%" PRIx64 " bytes %d"
qcow2_writev_start_req(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64
qcow2_writev_done_req(void *co, int ret) "co %p ret %d"
qcow2_writev_start_part(void *co) "co %p"
qcow2_writev_done_part(void *co, int cur_bytes) "co %p cur_bytes %d"
qcow2_writev_data(void *co, uint64_t offset) "co %p offset 0x%" PRIx64
qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d"
qcow2_pwrite_zeroes(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d"
qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64
qcow2_pwrite_zeroes(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64
qcow2_skip_cow(void *co, uint64_t offset, int nb_clusters) "co %p offset 0x%" PRIx64 " nb_clusters %d"
# qcow2-cluster.c
@ -152,12 +152,12 @@ nvme_write_zeroes(void *s, uint64_t offset, uint64_t bytes, int flags) "s %p off
nvme_qiov_unaligned(const void *qiov, int n, void *base, size_t size, int align) "qiov %p n %d base %p size 0x%zx align 0x%x"
nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int is_write) "s %p offset 0x%"PRIx64" bytes %"PRId64" niov %d is_write %d"
nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int ret) "s %p is_write %d offset 0x%"PRIx64" bytes %"PRId64" ret %d"
nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64""
nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d"
nvme_dsm(void *s, int64_t offset, int64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64""
nvme_dsm_done(void *s, int64_t offset, int64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d"
nvme_dma_map_flush(void *s) "s %p"
nvme_free_req_queue_wait(void *s, unsigned q_index) "s %p q #%u"
nvme_create_queue_pair(unsigned q_index, void *q, size_t size, void *aio_context, int fd) "index %u q %p size %zu aioctx %p fd %d"
nvme_free_queue_pair(unsigned q_index, void *q) "index %u q %p"
nvme_free_queue_pair(unsigned q_index, void *q, void *cq, void *sq) "index %u q %p cq %p sq %p"
nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s %p cmd %p req %p qiov %p entries %d"
nvme_cmd_map_qiov_pages(void *s, int i, uint64_t page) "s %p page[%d] 0x%"PRIx64
nvme_cmd_map_qiov_iov(void *s, int i, void *page, int pages) "s %p iov[%d] %p pages %d"

View File

@ -544,8 +544,8 @@ static int coroutine_fn vdi_co_block_status(BlockDriverState *bs,
}
static int coroutine_fn
vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
vdi_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVVdiState *s = bs->opaque;
QEMUIOVector local_qiov;
@ -600,8 +600,8 @@ vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
}
static int coroutine_fn
vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
vdi_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVVdiState *s = bs->opaque;
QEMUIOVector local_qiov;

View File

@ -60,6 +60,7 @@
#define VMDK_ZEROED (-3)
#define BLOCK_OPT_ZEROED_GRAIN "zeroed_grain"
#define BLOCK_OPT_TOOLSVERSION "toolsversion"
typedef struct {
uint32_t version;
@ -1888,8 +1889,8 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
}
static int coroutine_fn
vmdk_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
vmdk_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVVmdkState *s = bs->opaque;
int ret;
@ -2068,8 +2069,8 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
}
static int coroutine_fn
vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
vmdk_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
int ret;
BDRVVmdkState *s = bs->opaque;
@ -2080,8 +2081,8 @@ vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
}
static int coroutine_fn
vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov)
vmdk_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov)
{
if (bytes == 0) {
/* The caller will write bytes 0 to signal EOF.
@ -2109,7 +2110,7 @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
static int coroutine_fn vmdk_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset,
int bytes,
int64_t bytes,
BdrvRequestFlags flags)
{
int ret;
@ -2344,6 +2345,7 @@ static int coroutine_fn vmdk_co_do_create(int64_t size,
BlockdevVmdkAdapterType adapter_type,
const char *backing_file,
const char *hw_version,
const char *toolsversion,
bool compat6,
bool zeroed_grain,
vmdk_create_extent_fn extent_fn,
@ -2384,7 +2386,8 @@ static int coroutine_fn vmdk_co_do_create(int64_t size,
"ddb.geometry.cylinders = \"%" PRId64 "\"\n"
"ddb.geometry.heads = \"%" PRIu32 "\"\n"
"ddb.geometry.sectors = \"63\"\n"
"ddb.adapterType = \"%s\"\n";
"ddb.adapterType = \"%s\"\n"
"ddb.toolsVersion = \"%s\"\n";
ext_desc_lines = g_string_new(NULL);
@ -2401,6 +2404,9 @@ static int coroutine_fn vmdk_co_do_create(int64_t size,
if (!hw_version) {
hw_version = "4";
}
if (!toolsversion) {
toolsversion = "2147483647";
}
if (adapter_type != BLOCKDEV_VMDK_ADAPTER_TYPE_IDE) {
/* that's the number of heads with which vmware operates when
@ -2525,7 +2531,8 @@ static int coroutine_fn vmdk_co_do_create(int64_t size,
size /
(int64_t)(63 * number_heads * BDRV_SECTOR_SIZE),
number_heads,
BlockdevVmdkAdapterType_str(adapter_type));
BlockdevVmdkAdapterType_str(adapter_type),
toolsversion);
desc_len = strlen(desc);
/* the descriptor offset = 0x200 */
if (!split && !flat) {
@ -2617,6 +2624,7 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv,
BlockdevVmdkAdapterType adapter_type_enum;
char *backing_file = NULL;
char *hw_version = NULL;
char *toolsversion = NULL;
char *fmt = NULL;
BlockdevVmdkSubformat subformat;
int ret = 0;
@ -2649,6 +2657,7 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv,
adapter_type = qemu_opt_get_del(opts, BLOCK_OPT_ADAPTER_TYPE);
backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
hw_version = qemu_opt_get_del(opts, BLOCK_OPT_HWVERSION);
toolsversion = qemu_opt_get_del(opts, BLOCK_OPT_TOOLSVERSION);
compat6 = qemu_opt_get_bool_del(opts, BLOCK_OPT_COMPAT6, false);
if (strcmp(hw_version, "undefined") == 0) {
g_free(hw_version);
@ -2692,14 +2701,15 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv,
.opts = opts,
};
ret = vmdk_co_do_create(total_size, subformat, adapter_type_enum,
backing_file, hw_version, compat6, zeroed_grain,
vmdk_co_create_opts_cb, &data, errp);
backing_file, hw_version, toolsversion, compat6,
zeroed_grain, vmdk_co_create_opts_cb, &data, errp);
exit:
g_free(backing_fmt);
g_free(adapter_type);
g_free(backing_file);
g_free(hw_version);
g_free(toolsversion);
g_free(fmt);
g_free(desc);
g_free(path);
@ -2782,6 +2792,7 @@ static int coroutine_fn vmdk_co_create(BlockdevCreateOptions *create_options,
opts->adapter_type,
opts->backing_file,
opts->hwversion,
opts->toolsversion,
false,
opts->zeroed_grain,
vmdk_co_create_cb,
@ -3031,6 +3042,11 @@ static QemuOptsList vmdk_create_opts = {
.help = "VMDK hardware version",
.def_value_str = "undefined"
},
{
.name = BLOCK_OPT_TOOLSVERSION,
.type = QEMU_OPT_STRING,
.help = "VMware guest tools version",
},
{
.name = BLOCK_OPT_SUBFMT,
.type = QEMU_OPT_STRING,

View File

@ -276,7 +276,8 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
if (ret < 0) {
goto fail;
}
if (strncmp(footer->creator, "conectix", 8)) {
if (strncmp(footer->creator, "conectix", 8) ||
be32_to_cpu(footer->type) != VHD_FIXED) {
error_setg(errp, "invalid VPC image");
ret = -EINVAL;
goto fail;
@ -608,8 +609,8 @@ static int vpc_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
}
static int coroutine_fn
vpc_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
vpc_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVVPCState *s = bs->opaque;
int ret;
@ -658,8 +659,8 @@ fail:
}
static int coroutine_fn
vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
vpc_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
BDRVVPCState *s = bs->opaque;
int64_t image_offset;

View File

@ -1522,8 +1522,8 @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num,
}
static int coroutine_fn
vvfat_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
vvfat_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
int ret;
BDRVVVFATState *s = bs->opaque;
@ -3061,8 +3061,8 @@ DLOG(checkpoint());
}
static int coroutine_fn
vvfat_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
vvfat_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
int ret;
BDRVVVFATState *s = bs->opaque;

View File

@ -1847,7 +1847,7 @@ static void drive_backup_abort(BlkActionState *common)
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
job_cancel_sync(&state->job->job);
job_cancel_sync(&state->job->job, true);
aio_context_release(aio_context);
}
@ -1948,7 +1948,7 @@ static void blockdev_backup_abort(BlkActionState *common)
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
job_cancel_sync(&state->job->job);
job_cancel_sync(&state->job->job, true);
aio_context_release(aio_context);
}

View File

@ -1,6 +1,3 @@
/* $OpenBSD: errno.h,v 1.20 2007/09/03 14:37:52 millert Exp $ */
/* $NetBSD: errno.h,v 1.10 1996/01/20 01:33:53 jtc Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
@ -37,6 +34,9 @@
* @(#)errno.h 8.5 (Berkeley) 1/21/94
*/
#ifndef _ERRNO_DEFS_H_
#define _ERRNO_DEFS_H_
#define TARGET_EPERM 1 /* Operation not permitted */
#define TARGET_ENOENT 2 /* No such file or directory */
#define TARGET_ESRCH 3 /* No such process */
@ -147,3 +147,10 @@
#define TARGET_EIDRM 89 /* Identifier removed */
#define TARGET_ENOMSG 90 /* No message of desired type */
#define TARGET_ELAST 90 /* Must be equal largest errno */
/* Internal errors: */
#define TARGET_EJUSTRETURN 254 /* Just return without modifing regs */
#define TARGET_ERESTART 255 /* Restart syscall */
#define TARGET_ERESTARTSYS TARGET_ERESTART /* Linux compat */
#endif /* ! _ERRNO_DEFS_H_ */

View File

@ -0,0 +1,3 @@
bsd_user_ss.add(files(
'os-sys.c',
))

27
bsd-user/freebsd/os-sys.c Normal file
View File

@ -0,0 +1,27 @@
/*
* FreeBSD sysctl() and sysarch() system call emulation
*
* Copyright (c) 2013-15 Stacey D. Son
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu.h"
#include "target_arch_sysarch.h"
/* sysarch() is architecture dependent. */
abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2)
{
return do_freebsd_arch_sysarch(cpu_env, arg1, arg2);
}

View File

@ -33,10 +33,6 @@
{ TARGET_FREEBSD_NR___syscall, "__syscall", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR___sysctl, "__sysctl", NULL, print_sysctl, NULL },
{ TARGET_FREEBSD_NR__umtx_op, "_umtx_op", "%s(%#x, %d, %d, %#x, %#x)", NULL, NULL },
#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000
{ TARGET_FREEBSD_NR__umtx_lock, "__umtx_lock", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR__umtx_unlock, "__umtx_unlock", NULL, NULL, NULL },
#endif
{ TARGET_FREEBSD_NR_accept, "accept", "%s(%d,%#x,%#x)", NULL, NULL },
{ TARGET_FREEBSD_NR_accept4, "accept4", "%s(%d,%d,%#x,%#x)", NULL, NULL },
{ TARGET_FREEBSD_NR_access, "access", "%s(\"%s\",%#o)", NULL, NULL },
@ -49,10 +45,6 @@
{ TARGET_FREEBSD_NR_cap_fcntls_get, "cap_fcntls_get", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_cap_fcntls_limit, "cap_fcntls_limit", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_cap_getmode, "cap_getmode", NULL, NULL, NULL },
#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000
{ TARGET_FREEBSD_NR_cap_getrights, "cap_getrights", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_cap_new, "cap_new", NULL, NULL, NULL },
#endif
{ TARGET_FREEBSD_NR_cap_ioctls_get, "cap_ioctls_get", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_cap_ioctls_limit, "cap_ioctls_limit", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_cap_rights_limit, "cap_rights_limit", NULL, NULL, NULL },
@ -146,9 +138,6 @@
{ TARGET_FREEBSD_NR_freebsd11_kevent, "freebsd11_kevent", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_kevent, "kevent", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_kill, "kill", NULL, NULL, NULL },
#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000
{ TARGET_FREEBSD_NR_killpg, "killpg", NULL, NULL, NULL },
#endif
{ TARGET_FREEBSD_NR_kqueue, "kqueue", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_ktrace, "ktrace", NULL, NULL, NULL },
{ TARGET_FREEBSD_NR_lchown, "lchown", NULL, NULL, NULL },

View File

@ -38,10 +38,6 @@
#define ELF_PLATFORM (NULL)
#endif
#ifndef ELF_HWCAP
#define ELF_HWCAP 0
#endif
/* XXX Look at the other conflicting AT_* values. */
#define FREEBSD_AT_NCPUS 19
#define FREEBSD_AT_HWCAP 25
@ -114,12 +110,16 @@ static abi_ulong target_create_elf_tables(abi_ulong p, int argc, int envc,
NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
NEW_AUX_ENT(FREEBSD_AT_NCPUS, (abi_ulong)bsd_get_ncpu());
NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
features = ELF_HWCAP;
NEW_AUX_ENT(FREEBSD_AT_HWCAP, features);
#ifdef ELF_HWCAP2
features = ELF_HWCAP2;
NEW_AUX_ENT(FREEBSD_AT_HWCAP2, features);
#endif
NEW_AUX_ENT(AT_UID, (abi_ulong)getuid());
NEW_AUX_ENT(AT_EUID, (abi_ulong)geteuid());
NEW_AUX_ENT(AT_GID, (abi_ulong)getgid());
NEW_AUX_ENT(AT_EGID, (abi_ulong)getegid());
features = ELF_HWCAP;
NEW_AUX_ENT(FREEBSD_AT_HWCAP, features);
target_auxents = sp; /* Note where the aux entries are in the target */
#ifdef ARCH_DLINFO
/*

View File

@ -1,6 +1,9 @@
#ifndef _TARGET_OS_SIGNAL_H_
#define _TARGET_OS_SIGNAL_H_
/* FreeBSD's sys/ucontext.h defines this */
#define TARGET_MC_GET_CLEAR_RET 0x0001
#include "target_os_siginfo.h"
#include "target_arch_signal.h"

View File

@ -61,15 +61,7 @@ struct target_sockaddr_storage {
/*
* from sys/user.h
*/
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
#define TARGET_KI_NSPARE_INT 2
#elif defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
#define TARGET_KI_NSPARE_INT 4
#elif defined(__FreeBSD_version) && __FreeBSD_version >= 1000000
#define TARGET_KI_NSPARE_INT 7
#else
#define TARGET_KI_NSPARE_INT 9
#endif /* ! __FreeBSD_version >= 1000000 */
#define TARGET_KI_NSPARE_LONG 12
#define TARGET_KI_NSPARE_PTR 6
@ -116,11 +108,7 @@ struct target_kinfo_proc {
int32_t ki_tsid; /* Terminal session ID */
int16_t ki_jobc; /* job control counter */
int16_t ki_spare_short1; /* unused (just here for alignment) */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
int32_t ki_tdev__freebsd11; /* controlling tty dev */
#else
int32_t ki_tdev; /* controlling tty dev */
#endif
target_sigset_t ki_siglist; /* Signals arrived but not delivered */
target_sigset_t ki_sigmask; /* Current signal mask */
target_sigset_t ki_sigignore; /* Signals being ignored */
@ -164,45 +152,24 @@ struct target_kinfo_proc {
int8_t ki_nice; /* Process "nice" value */
char ki_lock; /* Process lock (prevent swap) count */
char ki_rqindex; /* Run queue index */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
u_char ki_oncpu_old; /* Which cpu we are on (legacy) */
u_char ki_lastcpu_old; /* Last cpu we were on (legacy) */
#else
u_char ki_oncpu; /* Which cpu we are on */
u_char ki_lastcpu; /* Last cpu we were on */
#endif /* ! __FreeBSD_version >= 1100000 */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000
char ki_tdname[TARGET_TDNAMLEN + 1]; /* thread name */
#else
char ki_ocomm[TARGET_TDNAMLEN + 1]; /* thread name */
#endif /* ! __FreeBSD_version >= 900000 */
char ki_wmesg[TARGET_WMESGLEN + 1]; /* wchan message */
char ki_login[TARGET_LOGNAMELEN + 1]; /* setlogin name */
char ki_lockname[TARGET_LOCKNAMELEN + 1]; /* lock name */
char ki_comm[TARGET_COMMLEN + 1]; /* command name */
char ki_emul[TARGET_KI_EMULNAMELEN + 1]; /* emulation name */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000
char ki_loginclass[TARGET_LOGINCLASSLEN + 1]; /* login class */
#endif /* ! __FreeBSD_version >= 900000 */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000
char ki_sparestrings[50]; /* spare string space */
#else
char ki_sparestrings[68]; /* spare string space */
#endif /* ! __FreeBSD_version >= 900000 */
int32_t ki_spareints[TARGET_KI_NSPARE_INT]; /* spare room for growth */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
uint64_t ki_tdev; /* controlling tty dev */
#endif
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
int32_t ki_oncpu; /* Which cpu we are on */
int32_t ki_lastcpu; /* Last cpu we were on */
int32_t ki_tracer; /* Pid of tracing process */
#endif /* __FreeBSD_version >= 1100000 */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000
int32_t ki_flag2; /* P2_* flags */
int32_t ki_fibnum; /* Default FIB number */
#endif /* ! __FreeBSD_version >= 900000 */
uint32_t ki_cr_flags; /* Credential flags */
int32_t ki_jid; /* Process jail ID */
int32_t ki_numthreads; /* XXXKSE number of threads in total */
@ -234,18 +201,8 @@ struct target_kinfo_file {
int32_t kf_flags; /* Flags. */
int32_t kf_pad0; /* Round to 64 bit alignment. */
int64_t kf_offset; /* Seek location. */
#if defined(__FreeBSD_version) && __FreeBSD_version < 1200031
int32_t kf_vnode_type; /* Vnode type. */
int32_t kf_sock_domain; /* Socket domain. */
int32_t kf_sock_type; /* Socket type. */
int32_t kf_sock_protocol; /* Socket protocol. */
struct target_sockaddr_storage kf_sa_local; /* Socket address. */
struct target_sockaddr_storage kf_sa_peer; /* Peer address. */
#endif
#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000
union {
struct {
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
uint32_t kf_spareint;
/* Socket domain. */
int kf_sock_domain0;
@ -257,7 +214,6 @@ struct target_kinfo_file {
struct sockaddr_storage kf_sa_local;
/* Peer address. */
struct sockaddr_storage kf_sa_peer;
#endif
/* Address of so_pcb. */
uint64_t kf_sock_pcb;
/* Address of inp_ppcb. */
@ -272,7 +228,6 @@ struct target_kinfo_file {
uint32_t kf_sock_pad0;
} kf_sock;
struct {
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
/* Vnode type. */
int kf_file_type;
/* Space for future use */
@ -290,16 +245,6 @@ struct target_kinfo_file {
uint32_t kf_file_fsid_freebsd11;
/* File device, FreeBSD 11 compat. */
uint32_t kf_file_rdev_freebsd11;
#else
/* Global file id. */
uint64_t kf_file_fileid;
/* File size. */
uint64_t kf_file_size;
/* Vnode filesystem id. */
uint32_t kf_file_fsid;
/* File device. */
uint32_t kf_file_rdev;
#endif
/* File mode. */
uint16_t kf_file_mode;
/* Round to 64 bit alignment. */
@ -307,18 +252,14 @@ struct target_kinfo_file {
uint32_t kf_file_pad1;
} kf_file;
struct {
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
uint32_t kf_spareint[4];
uint64_t kf_spareint64[32];
#endif
uint32_t kf_sem_value;
uint16_t kf_sem_mode;
} kf_sem;
struct {
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
uint32_t kf_spareint[4];
uint64_t kf_spareint64[32];
#endif
uint64_t kf_pipe_addr;
uint64_t kf_pipe_peer;
uint32_t kf_pipe_buffer_cnt;
@ -326,7 +267,6 @@ struct target_kinfo_file {
uint32_t kf_pipe_pad0[3];
} kf_pipe;
struct {
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
uint32_t kf_spareint[4];
uint64_t kf_spareint64[32];
uint32_t kf_pts_dev_freebsd11;
@ -334,34 +274,18 @@ struct target_kinfo_file {
uint64_t kf_pts_dev;
/* Round to 64 bit alignment. */
uint32_t kf_pts_pad1[4];
#else
uint32_t kf_pts_dev;
/* Round to 64 bit alignment. */
uint32_t kf_pts_pad0[7];
#endif
} kf_pts;
struct {
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
uint32_t kf_spareint[4];
uint64_t kf_spareint64[32];
#endif
int32_t kf_pid;
} kf_proc;
} kf_un;
uint16_t kf_status; /* Status flags. */
uint16_t kf_pad1; /* Round to 32 bit alignment. */
int32_t _kf_ispare0; /* Space for more stuff. */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1000000
target_cap_rights_t kf_cap_rights; /* Capability rights. */
uint64_t _kf_cap_spare; /* Space for future cap_rights_t. */
#else /* ! __FreeBSD_version >= 1000000 */
uint64_t kf_cap_rights;
int _kf_ispare[4];
#endif /* ! __FreeBSD_version >= 1000000 */
#else /* ! __FreeBSD_version >= 900000 */
int _kf_ispare[16];
#endif /* ! __FreeBSD_version >= 900000 */
/* Truncated before copyout in sysctl */
char kf_path[PATH_MAX]; /* Path to file, if any. */
};
@ -372,34 +296,19 @@ struct target_kinfo_vmentry {
uint64_t kve_start; /* Starting address. */
uint64_t kve_end; /* Finishing address. */
uint64_t kve_offset; /* Mapping offset in object */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000
uint64_t kve_vn_fileid; /* inode number if vnode */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
uint32_t kve_vn_fsid_freebsd11; /* dev_t of vnode location */
#else
uint32_t kve_vn_fsid; /* dev_t of vnode location */
#endif
#else /* ! __FreeBSD_version >= 900000 */
uint64_t kve_fileid; /* inode number if vnode */
uint32_t kve_fsid; /* dev_t of vnode location */
#endif /* ! __FreeBSD_version >= 900000 */
int32_t kve_flags; /* Flags on map entry. */
int32_t kve_resident; /* Number of resident pages. */
int32_t kve_private_resident; /* Number of private pages. */
int32_t kve_protection; /* Protection bitmask. */
int32_t kve_ref_count; /* VM obj ref count. */
int32_t kve_shadow_count; /* VM obj shadow count. */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000
int32_t kve_vn_type; /* Vnode type. */
uint64_t kve_vn_size; /* File size. */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
uint32_t kve_vn_rdev_freebsd11; /* Device id if device. */
#else
uint32_t kve_vn_rdev; /* Device id if device. */
#endif
uint16_t kve_vn_mode; /* File mode. */
uint16_t kve_status; /* Status flags. */
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031
#if (__FreeBSD_version >= 1300501 && __FreeBSD_version < 1400000) || \
__FreeBSD_version >= 1400009
union {
@ -413,13 +322,6 @@ struct target_kinfo_vmentry {
#endif
uint64_t kve_vn_rdev; /* Device id if device. */
int _kve_ispare[8]; /* Space for more stuff. */
#else
int32_t _kve_ispare[12]; /* Space for more stuff. */
#endif
#else /* ! __FreeBSD_version >= 900000 */
int _kve_pad0;
int32_t _kve_ispare[16]; /* Space for more stuff. */
#endif /* ! __FreeBSD_version >= 900000 */
/* Truncated before copyout in sysctl */
char kve_path[PATH_MAX]; /* Path to VM obj, if any. */
};

View File

@ -23,8 +23,6 @@
#define TARGET_DEFAULT_CPU_MODEL "qemu32"
#define TARGET_CPU_RESET(cpu)
static inline void target_cpu_init(CPUX86State *env,
struct target_pt_regs *regs)
{

View File

@ -27,8 +27,6 @@
#define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */
#define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */
#define TARGET_MC_GET_CLEAR_RET 0x0001
struct target_sigcontext {
/* to be added */
};

View File

@ -195,6 +195,15 @@ static void usage(void)
__thread CPUState *thread_cpu;
void stop_all_tasks(void)
{
/*
* We trust when using NPTL (pthreads) start_exclusive() handles thread
* stopping correctly.
*/
start_exclusive();
}
bool qemu_cpu_is_self(CPUState *cpu)
{
return thread_cpu == cpu;
@ -210,7 +219,6 @@ void init_task_state(TaskState *ts)
{
int i;
ts->used = 1;
ts->first_free = ts->sigqueue_table;
for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) {
ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1];

View File

@ -1,3 +1,7 @@
if not have_bsd_user
subdir_done()
endif
bsd_user_ss.add(files(
'bsdload.c',
'elfload.c',
@ -8,3 +12,6 @@ bsd_user_ss.add(files(
'syscall.c',
'uaccess.c',
))
# Pull in the OS-specific build glue, if any
subdir(targetos)

View File

@ -21,8 +21,6 @@
#include "qemu.h"
#include "qemu-common.h"
//#define DEBUG_MMAP
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
static __thread int mmap_lock_count;
@ -67,14 +65,11 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
abi_ulong end, host_start, host_end, addr;
int prot1, ret;
#ifdef DEBUG_MMAP
printf("mprotect: start=0x" TARGET_ABI_FMT_lx
qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx
" len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
prot & PROT_READ ? 'r' : '-',
prot & PROT_WRITE ? 'w' : '-',
prot & PROT_EXEC ? 'x' : '-');
#endif
if ((start & ~TARGET_PAGE_MASK) != 0)
return -EINVAL;
len = TARGET_PAGE_ALIGN(len);
@ -132,7 +127,27 @@ error:
return ret;
}
/* map an incomplete host page */
/*
* map an incomplete host page
*
* mmap_frag can be called with a valid fd, if flags doesn't contain one of
* MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we
* pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be
* added.
*
* * If fd is valid (not -1) we want to map the pages with MAP_ANON.
* * If flags contains MAP_GUARD we don't want to add MAP_ANON because it
* will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD
* in sys/vm/vm_mmap.c.
* * If flags contains MAP_ANON it doesn't matter if we add it or not.
* * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't
* matter if we add it or not either. See enforcing of constraints for
* MAP_STACK in kern_mmap.
*
* Don't add MAP_ANON for the flags that use fd == -1 without specifying the
* flags directly, with the assumption that future flags that require fd == -1
* will also not require MAP_ANON.
*/
static int mmap_frag(abi_ulong real_start,
abi_ulong start, abi_ulong end,
int prot, int flags, int fd, abi_ulong offset)
@ -152,9 +167,9 @@ static int mmap_frag(abi_ulong real_start,
}
if (prot1 == 0) {
/* no page was there, so we allocate one */
/* no page was there, so we allocate one. See also above. */
void *p = mmap(host_start, qemu_host_page_size, prot,
flags | MAP_ANON, -1, 0);
flags | ((fd != -1) ? MAP_ANON : 0), -1, 0);
if (p == MAP_FAILED)
return -1;
prot1 = prot;
@ -162,7 +177,7 @@ static int mmap_frag(abi_ulong real_start,
prot1 &= PAGE_BITS;
prot_new = prot | prot1;
if (!(flags & MAP_ANON)) {
if (fd != -1) {
/* msync() won't work here, so we return an error if write is
possible while it is a shared mapping */
if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
@ -174,16 +189,20 @@ static int mmap_frag(abi_ulong real_start,
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
/* read the corresponding file data */
pread(fd, g2h_untagged(start), end - start, offset);
if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
return -1;
}
/* put final protection */
if (prot_new != (prot1 | PROT_WRITE))
mprotect(host_start, qemu_host_page_size, prot_new);
} else {
/* just update the protection */
if (prot_new != prot1) {
mprotect(host_start, qemu_host_page_size, prot_new);
}
if (prot_new & PROT_WRITE) {
memset(g2h_untagged(start), 0, end - start);
}
}
return 0;
}
@ -281,14 +300,10 @@ static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
addr = start;
wrapped = repeat = 0;
prev = 0;
flags = MAP_ANONYMOUS | MAP_PRIVATE;
#ifdef MAP_ALIGNED
flags = MAP_ANON | MAP_PRIVATE;
if (alignment != 0) {
flags |= MAP_ALIGNED(alignment);
}
#else
/* XXX TODO */
#endif
for (;; prev = ptr) {
/*
@ -391,57 +406,48 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
mmap_lock();
#ifdef DEBUG_MMAP
{
printf("mmap: start=0x" TARGET_ABI_FMT_lx
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx
" len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
start, len,
prot & PROT_READ ? 'r' : '-',
prot & PROT_WRITE ? 'w' : '-',
prot & PROT_EXEC ? 'x' : '-');
if (flags & MAP_ALIGNMENT_MASK) {
printf("MAP_ALIGNED(%u) ", (flags & MAP_ALIGNMENT_MASK)
>> MAP_ALIGNMENT_SHIFT);
qemu_log("MAP_ALIGNED(%u) ",
(flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
}
#if MAP_GUARD
if (flags & MAP_GUARD) {
printf("MAP_GUARD ");
qemu_log("MAP_GUARD ");
}
#endif
if (flags & MAP_FIXED) {
printf("MAP_FIXED ");
qemu_log("MAP_FIXED ");
}
if (flags & MAP_ANONYMOUS) {
printf("MAP_ANON ");
if (flags & MAP_ANON) {
qemu_log("MAP_ANON ");
}
#ifdef MAP_EXCL
if (flags & MAP_EXCL) {
printf("MAP_EXCL ");
qemu_log("MAP_EXCL ");
}
#endif
if (flags & MAP_PRIVATE) {
printf("MAP_PRIVATE ");
qemu_log("MAP_PRIVATE ");
}
if (flags & MAP_SHARED) {
printf("MAP_SHARED ");
qemu_log("MAP_SHARED ");
}
if (flags & MAP_NOCORE) {
printf("MAP_NOCORE ");
qemu_log("MAP_NOCORE ");
}
#ifdef MAP_STACK
if (flags & MAP_STACK) {
printf("MAP_STACK ");
qemu_log("MAP_STACK ");
}
#endif
printf("fd=%d offset=0x%llx\n", fd, offset);
qemu_log("fd=%d offset=0x%lx\n", fd, offset);
}
#endif
if ((flags & MAP_ANONYMOUS) && fd != -1) {
if ((flags & MAP_ANON) && fd != -1) {
errno = EINVAL;
goto fail;
}
#ifdef MAP_STACK
if (flags & MAP_STACK) {
if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
(PROT_READ | PROT_WRITE))) {
@ -449,8 +455,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
goto fail;
}
}
#endif /* MAP_STACK */
#ifdef MAP_GUARD
if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
/* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
@ -458,18 +462,24 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
errno = EINVAL;
goto fail;
}
#endif
if (offset & ~TARGET_PAGE_MASK) {
errno = EINVAL;
goto fail;
}
len = TARGET_PAGE_ALIGN(len);
if (len == 0) {
errno = EINVAL;
goto fail;
}
/* Check for overflows */
len = TARGET_PAGE_ALIGN(len);
if (len == 0) {
errno = ENOMEM;
goto fail;
}
real_start = start & qemu_host_page_mask;
host_offset = offset & qemu_host_page_mask;
@ -536,7 +546,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
* qemu_real_host_page_size
*/
p = mmap(g2h_untagged(start), host_len, prot,
flags | MAP_FIXED | ((fd != -1) ? MAP_ANONYMOUS : 0), -1, 0);
flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0);
if (p == MAP_FAILED)
goto fail;
/* update start so that it points to the file position at 'offset' */
@ -564,18 +574,16 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
* It can fail only on 64-bit host with 32-bit target.
* On any other target/host host mmap() handles this error correctly.
*/
#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
if (!guest_range_valid_untagged(start, len)) {
errno = EINVAL;
goto fail;
}
#endif
/*
* worst case: we cannot map the file because the offset is not
* aligned, so we read it
*/
if (!(flags & MAP_ANON) &&
if (fd != -1 &&
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
/*
* msync() won't work here, so we return an error if write is
@ -591,17 +599,22 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
-1, 0);
if (retaddr == -1)
goto fail;
pread(fd, g2h_untagged(start), len, offset);
if (pread(fd, g2h_untagged(start), len, offset) == -1) {
goto fail;
}
if (!(prot & PROT_WRITE)) {
ret = target_mprotect(start, len, prot);
if (ret != 0) {
start = ret;
goto the_end;
}
assert(ret == 0);
}
goto the_end;
}
/* Reject the mapping if any page within the range is mapped */
if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
errno = EINVAL;
goto fail;
}
/* handle the start of the mapping */
if (start > real_start) {
if (real_end == real_start + qemu_host_page_size) {
@ -697,8 +710,7 @@ static void mmap_reserve(abi_ulong start, abi_ulong size)
}
if (real_start != real_end) {
mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0);
MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
}
}

View File

@ -17,7 +17,6 @@
#ifndef QEMU_H
#define QEMU_H
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/units.h"
@ -73,15 +72,15 @@ struct image_info {
#define MAX_SIGQUEUE_SIZE 1024
struct sigqueue {
struct sigqueue *next;
struct qemu_sigqueue {
struct qemu_sigqueue *next;
target_siginfo_t info;
};
struct emulated_sigtable {
int pending; /* true if signal is pending */
struct sigqueue *first;
/* in order to always have memory for the first signal, we put it here */
struct sigqueue info;
struct qemu_sigqueue *first;
struct qemu_sigqueue info; /* Put first signal info here */
};
/*
@ -92,18 +91,18 @@ typedef struct TaskState {
struct TaskState *next;
struct bsd_binprm *bprm;
int used; /* non zero if used */
struct image_info *info;
struct emulated_sigtable sigtab[TARGET_NSIG];
struct sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */
struct sigqueue *first_free; /* first free siginfo queue entry */
struct qemu_sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */
struct qemu_sigqueue *first_free; /* first free siginfo queue entry */
int signal_pending; /* non zero if a signal may be pending */
uint8_t stack[];
} __attribute__((aligned(16))) TaskState;
void init_task_state(TaskState *ts);
void stop_all_tasks(void);
extern const char *qemu_uname_release;
/*
@ -209,6 +208,7 @@ void process_pending_signals(CPUArchState *cpu_env);
void signal_init(void);
long do_sigreturn(CPUArchState *env);
long do_rt_sigreturn(CPUArchState *env);
void queue_signal(CPUArchState *env, int sig, target_siginfo_t *info);
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp);
/* mmap.c */
@ -235,6 +235,13 @@ extern unsigned long target_dflssiz;
extern unsigned long target_maxssiz;
extern unsigned long target_sgrowsiz;
/* syscall.c */
abi_long get_errno(abi_long ret);
bool is_error(abi_long ret);
/* os-sys.c */
abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2);
/* user access */
#define VERIFY_READ PAGE_READ

View File

@ -16,10 +16,24 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/osdep.h"
#include "qemu.h"
/*
* Stubbed out routines until we merge signal support from bsd-user
* fork.
*/
/*
* Queue a signal so that it will be send to the virtual CPU as soon as
* possible.
*/
void queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
{
qemu_log_mask(LOG_UNIMP, "No signal queueing, dropping signal %d\n", sig);
}
void signal_init(void)
{
}
@ -27,3 +41,19 @@ void signal_init(void)
void process_pending_signals(CPUArchState *cpu_env)
{
}
void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
MMUAccessType access_type, bool maperr, uintptr_t ra)
{
qemu_log_mask(LOG_UNIMP, "No signal support for SIGSEGV\n");
/* unreachable */
abort();
}
void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
MMUAccessType access_type, uintptr_t ra)
{
qemu_log_mask(LOG_UNIMP, "No signal support for SIGBUS\n");
/* unreachable */
abort();
}

View File

@ -33,18 +33,18 @@
static abi_ulong target_brk;
static abi_ulong target_original_brk;
static inline abi_long get_errno(abi_long ret)
abi_long get_errno(abi_long ret)
{
if (ret == -1)
if (ret == -1) {
/* XXX need to translate host -> target errnos here */
return -(errno);
else
}
return ret;
}
#define target_to_host_bitmask(x, tbl) (x)
static inline int is_error(abi_long ret)
bool is_error(abi_long ret)
{
return (abi_ulong)ret >= (abi_ulong)(-4096);
}
@ -88,56 +88,6 @@ static abi_long do_obreak(abi_ulong new_brk)
return 0;
}
#if defined(TARGET_I386)
static abi_long do_freebsd_sysarch(CPUX86State *env, int op, abi_ulong parms)
{
abi_long ret = 0;
abi_ulong val;
int idx;
switch (op) {
#ifdef TARGET_ABI32
case TARGET_FREEBSD_I386_SET_GSBASE:
case TARGET_FREEBSD_I386_SET_FSBASE:
if (op == TARGET_FREEBSD_I386_SET_GSBASE)
#else
case TARGET_FREEBSD_AMD64_SET_GSBASE:
case TARGET_FREEBSD_AMD64_SET_FSBASE:
if (op == TARGET_FREEBSD_AMD64_SET_GSBASE)
#endif
idx = R_GS;
else
idx = R_FS;
if (get_user(val, parms, abi_ulong))
return -TARGET_EFAULT;
cpu_x86_load_seg(env, idx, 0);
env->segs[idx].base = val;
break;
#ifdef TARGET_ABI32
case TARGET_FREEBSD_I386_GET_GSBASE:
case TARGET_FREEBSD_I386_GET_FSBASE:
if (op == TARGET_FREEBSD_I386_GET_GSBASE)
#else
case TARGET_FREEBSD_AMD64_GET_GSBASE:
case TARGET_FREEBSD_AMD64_GET_FSBASE:
if (op == TARGET_FREEBSD_AMD64_GET_GSBASE)
#endif
idx = R_GS;
else
idx = R_FS;
val = env->segs[idx].base;
if (put_user(val, parms, abi_ulong))
return -TARGET_EFAULT;
break;
/* XXX handle the others... */
default:
ret = -TARGET_EINVAL;
break;
}
return ret;
}
#endif
#ifdef __FreeBSD__
/*
* XXX this uses the undocumented oidfmt interface to find the kind of

View File

@ -23,8 +23,6 @@
#define TARGET_DEFAULT_CPU_MODEL "qemu64"
#define TARGET_CPU_RESET(cpu)
static inline void target_cpu_init(CPUX86State *env,
struct target_pt_regs *regs)
{

View File

@ -27,8 +27,6 @@
#define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */
#define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */
#define TARGET_MC_GET_CLEAR_RET 0x0001
struct target_sigcontext {
/* to be added */
};

Some files were not shown because too many files have changed in this diff Show More