Merge conflicts

This commit is contained in:
Andrea Fioraldi 2022-10-13 10:26:39 +02:00
commit f53dddf7d9
1023 changed files with 33954 additions and 13016 deletions

View File

@ -10,9 +10,9 @@ windows_msys2_task:
memory: 8G memory: 8G
env: env:
CIRRUS_SHELL: powershell CIRRUS_SHELL: powershell
MSYS: winsymlinks:nativestrict MSYS: winsymlinks:native
MSYSTEM: MINGW64 MSYSTEM: MINGW64
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-05-03/msys2-base-x86_64-20220503.sfx.exe MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe
MSYS2_FINGERPRINT: 0 MSYS2_FINGERPRINT: 0
MSYS2_PACKAGES: " MSYS2_PACKAGES: "
diffutils git grep make pkg-config sed diffutils git grep make pkg-config sed

1
.gitignore vendored
View File

@ -2,6 +2,7 @@
/qemu_libafl_bridge/target/ /qemu_libafl_bridge/target/
/GNUmakefile /GNUmakefile
/build/ /build/
/.cache/
*.pyc *.pyc
.sdk .sdk
.stgit-* .stgit-*

View File

@ -41,8 +41,7 @@ build-system-ubuntu:
job: amd64-ubuntu2004-container job: amd64-ubuntu2004-container
variables: variables:
IMAGE: ubuntu2004 IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-slirp=system CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-capstone
--enable-capstone
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
@ -120,7 +119,7 @@ build-system-fedora:
variables: variables:
IMAGE: fedora IMAGE: fedora
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
--enable-fdt=system --enable-slirp=system --enable-capstone --enable-fdt=system --enable-slirp --enable-capstone
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
@ -339,10 +338,8 @@ clang-user:
# On gitlab runners, default value sometimes end up calling 2 lds concurrently and # On gitlab runners, default value sometimes end up calling 2 lds concurrently and
# triggers an Out-Of-Memory error # triggers an Out-Of-Memory error
# #
# Since slirp callbacks are used in QEMU Timers, slirp needs to be compiled together # Since slirp callbacks are used in QEMU Timers, we cannot use libslirp with
# with QEMU and linked as a static library to avoid false positives in CFI checks. # CFI builds, and thus have to disable it here.
# This can be accomplished by using -enable-slirp=git, which avoids the use of
# a system-wide version of the library
# #
# Split in three sets of build/check/avocado to limit the execution time of each # Split in three sets of build/check/avocado to limit the execution time of each
# job # job
@ -355,7 +352,7 @@ build-cfi-aarch64:
AR: llvm-ar AR: llvm-ar
IMAGE: fedora IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git --enable-safe-stack --disable-slirp
TARGETS: aarch64-softmmu TARGETS: aarch64-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
# FIXME: This job is often failing, likely due to out-of-memory problems in # FIXME: This job is often failing, likely due to out-of-memory problems in
@ -395,7 +392,7 @@ build-cfi-ppc64-s390x:
AR: llvm-ar AR: llvm-ar
IMAGE: fedora IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git --enable-safe-stack --disable-slirp
TARGETS: ppc64-softmmu s390x-softmmu TARGETS: ppc64-softmmu s390x-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
# FIXME: This job is often failing, likely due to out-of-memory problems in # FIXME: This job is often failing, likely due to out-of-memory problems in
@ -435,7 +432,7 @@ build-cfi-x86_64:
AR: llvm-ar AR: llvm-ar
IMAGE: fedora IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --enable-slirp=git --enable-safe-stack --disable-slirp
TARGETS: x86_64-softmmu TARGETS: x86_64-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
timeout: 70m timeout: 70m
@ -469,7 +466,7 @@ tsan-build:
variables: variables:
IMAGE: ubuntu2004 IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10 CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10
--enable-trace-backends=ust --enable-fdt=system --enable-slirp=system --enable-trace-backends=ust --enable-fdt=system --disable-slirp
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
MAKE_CHECK_ARGS: bench V=1 MAKE_CHECK_ARGS: bench V=1
@ -571,7 +568,6 @@ build-without-default-features:
--disable-capstone --disable-capstone
--disable-pie --disable-pie
--disable-qom-cast-debug --disable-qom-cast-debug
--disable-slirp
--disable-strip --disable-strip
TARGETS: avr-softmmu i386-softmmu mips64-softmmu s390x-softmmu sh4-softmmu TARGETS: avr-softmmu i386-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user

View File

@ -63,7 +63,7 @@ x64-freebsd-13-build:
NAME: freebsd-13 NAME: freebsd-13
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-13-0 CIRRUS_VM_IMAGE_NAME: freebsd-13-1
CIRRUS_VM_CPUS: 8 CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update UPDATE_COMMAND: pkg update

View File

@ -32,5 +32,6 @@ build_task:
- $MAKE -j$(sysctl -n hw.ncpu) - $MAKE -j$(sysctl -n hw.ncpu)
- for TARGET in $TEST_TARGETS ; - for TARGET in $TEST_TARGETS ;
do do
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 ; $MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
done done

View File

@ -1,5 +1,4 @@
# THIS FILE WAS AUTO-GENERATED # THIS FILE WAS AUTO-GENERATED
# ... and then edited to fix py39, pending proper lcitool update.
# #
# $ lcitool variables freebsd-12 qemu # $ lcitool variables freebsd-12 qemu
# #
@ -12,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja' NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg' PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8' PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-virtualenv py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
PYPI_PKGS='' PYPI_PKGS=''
PYTHON='/usr/local/bin/python3' PYTHON='/usr/local/bin/python3'

View File

@ -1,5 +1,4 @@
# THIS FILE WAS AUTO-GENERATED # THIS FILE WAS AUTO-GENERATED
# ... and then edited to fix py39, pending proper lcitool update.
# #
# $ lcitool variables freebsd-13 qemu # $ lcitool variables freebsd-13 qemu
# #
@ -12,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja' NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg' PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8' PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-virtualenv py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
PYPI_PKGS='' PYPI_PKGS=''
PYTHON='/usr/local/bin/python3' PYTHON='/usr/local/bin/python3'

View File

@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja' NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='brew' PACKAGING_COMMAND='brew'
PIP3='/usr/local/bin/pip3' PIP3='/usr/local/bin/pip3'
PKGS='bash bc bzip2 capstone ccache ctags curl dbus diffutils dtc gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 zlib zstd' PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 zlib zstd'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme virtualenv' PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
PYTHON='/usr/local/bin/python3' PYTHON='/usr/local/bin/python3'

View File

@ -10,8 +10,3 @@ amd64-fedora-container:
extends: .container_job_template extends: .container_job_template
variables: variables:
NAME: fedora NAME: fedora
amd64-debian10-container:
extends: .container_job_template
variables:
NAME: debian10

View File

@ -1,21 +1,18 @@
alpha-debian-cross-container: alpha-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-alpha-cross NAME: debian-alpha-cross
amd64-debian-cross-container: amd64-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-amd64-cross NAME: debian-amd64-cross
amd64-debian-user-cross-container: amd64-debian-user-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-all-test-cross NAME: debian-all-test-cross
@ -65,22 +62,19 @@ hexagon-cross-container:
hppa-debian-cross-container: hppa-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-hppa-cross NAME: debian-hppa-cross
m68k-debian-cross-container: m68k-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-m68k-cross NAME: debian-m68k-cross
mips64-debian-cross-container: mips64-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-mips64-cross NAME: debian-mips64-cross
@ -92,8 +86,7 @@ mips64el-debian-cross-container:
mips-debian-cross-container: mips-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-mips-cross NAME: debian-mips-cross
@ -105,8 +98,7 @@ mipsel-debian-cross-container:
powerpc-test-cross-container: powerpc-test-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian11-container']
variables: variables:
NAME: debian-powerpc-test-cross NAME: debian-powerpc-test-cross
@ -127,8 +119,7 @@ riscv64-debian-cross-container:
# we can however build TCG tests using a non-sid base # we can however build TCG tests using a non-sid base
riscv64-debian-test-cross-container: riscv64-debian-test-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian11-container']
variables: variables:
NAME: debian-riscv64-test-cross NAME: debian-riscv64-test-cross
@ -140,22 +131,19 @@ s390x-debian-cross-container:
sh4-debian-cross-container: sh4-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-sh4-cross NAME: debian-sh4-cross
sparc64-debian-cross-container: sparc64-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-sparc64-cross NAME: debian-sparc64-cross
tricore-debian-cross-container: tricore-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers-layer2 stage: containers
needs: ['amd64-debian10-container']
variables: variables:
NAME: debian-tricore-cross NAME: debian-tricore-cross

View File

@ -7,11 +7,6 @@ amd64-alpine-container:
variables: variables:
NAME: alpine NAME: alpine
amd64-debian11-container:
extends: .container_job_template
variables:
NAME: debian11
amd64-debian-container: amd64-debian-container:
extends: .container_job_template extends: .container_job_template
stage: containers stage: containers

View File

@ -46,5 +46,8 @@
- cd build - cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH - PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-system --disable-system --target-list-exclude="aarch64_be-linux-user
alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user
nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
xtensa-linux-user $CROSS_SKIP_TARGETS"
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS

View File

@ -70,20 +70,6 @@ cross-i386-tci:
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
MAKE_CHECK_ARGS: check check-tcg MAKE_CHECK_ARGS: check check-tcg
cross-mips-system:
extends: .cross_system_build_job
needs:
job: mips-debian-cross-container
variables:
IMAGE: debian-mips-cross
cross-mips-user:
extends: .cross_user_build_job
needs:
job: mips-debian-cross-container
variables:
IMAGE: debian-mips-cross
cross-mipsel-system: cross-mipsel-system:
extends: .cross_system_build_job extends: .cross_system_build_job
needs: needs:

View File

@ -15,6 +15,6 @@ variables:
include: include:
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml' - local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml'
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-aarch64.yml' - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml'
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-aarch32.yml' - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml'
- local: '/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml' - local: '/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml'

View File

@ -23,6 +23,8 @@ centos-stream-8-x86_64:
- mkdir build - mkdir build
- cd build - cd build
- ../scripts/ci/org.centos/stream/8/x86_64/configure - ../scripts/ci/org.centos/stream/8/x86_64/configure
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make -j"$JOBS" - make -j"$JOBS"
- make NINJA=":" check - make NINJA=":" check
|| { cat meson-logs/testlog.txt; exit 1; } ;
- ../scripts/ci/org.centos/stream/8/x86_64/test-avocado - ../scripts/ci/org.centos/stream/8/x86_64/test-avocado

View File

@ -17,9 +17,12 @@ ubuntu-20.04-s390x-all-linux-static:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh - ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc` - make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1 - make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
- make --output-sync -j`nproc` check-tcg V=1 - make --output-sync -j`nproc` check-tcg V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-s390x-all: ubuntu-20.04-s390x-all:
needs: [] needs: []
@ -35,8 +38,10 @@ ubuntu-20.04-s390x-all:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-libssh - ../configure --disable-libssh
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc` - make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1 - make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-s390x-alldbg: ubuntu-20.04-s390x-alldbg:
needs: [] needs: []
@ -55,9 +60,11 @@ ubuntu-20.04-s390x-alldbg:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --enable-debug --disable-libssh - ../configure --enable-debug --disable-libssh
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make clean - make clean
- make --output-sync -j`nproc` - make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1 - make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-s390x-clang: ubuntu-20.04-s390x-clang:
needs: [] needs: []
@ -76,8 +83,10 @@ ubuntu-20.04-s390x-clang:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc` - make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1 - make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-s390x-tci: ubuntu-20.04-s390x-tci:
needs: [] needs: []
@ -96,6 +105,7 @@ ubuntu-20.04-s390x-tci:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-libssh --enable-tcg-interpreter - ../configure --disable-libssh --enable-tcg-interpreter
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc` - make --output-sync -j`nproc`
ubuntu-20.04-s390x-notcg: ubuntu-20.04-s390x-notcg:
@ -115,5 +125,7 @@ ubuntu-20.04-s390x-notcg:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-libssh --disable-tcg - ../configure --disable-libssh --disable-tcg
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc` - make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1 - make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;

View File

@ -1,12 +1,12 @@
# All ubuntu-20.04 jobs should run successfully in an environment # All ubuntu-22.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task # setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 20.04" # "Install basic packages to build QEMU on Ubuntu 20.04"
ubuntu-20.04-aarch32-all: ubuntu-22.04-aarch32-all:
needs: [] needs: []
stage: build stage: build
tags: tags:
- ubuntu_20.04 - ubuntu_22.04
- aarch32 - aarch32
rules: rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@ -19,5 +19,7 @@ ubuntu-20.04-aarch32-all:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --cross-prefix=arm-linux-gnueabihf- - ../configure --cross-prefix=arm-linux-gnueabihf-
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1 - make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;

View File

@ -2,30 +2,33 @@
# setup by the scripts/ci/setup/qemu/build-environment.yml task # setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 20.04" # "Install basic packages to build QEMU on Ubuntu 20.04"
ubuntu-20.04-aarch64-all-linux-static: ubuntu-22.04-aarch64-all-linux-static:
needs: [] needs: []
stage: build stage: build
tags: tags:
- ubuntu_20.04 - ubuntu_22.04
- aarch64 - aarch64
rules: rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$AARCH64_RUNNER_AVAILABLE" - if: "$AARCH64_RUNNER_AVAILABLE"
script: script:
# --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763
# --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages
- mkdir build - mkdir build
- cd build - cd build
- ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh # Disable -static-pie due to build error with system libc:
# https://bugs.launchpad.net/ubuntu/+source/glibc/+bug/1987438
- ../configure --enable-debug --static --disable-system --disable-pie
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1 - make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
- make --output-sync -j`nproc --ignore=40` check-tcg V=1 - make --output-sync -j`nproc --ignore=40` check-tcg V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-aarch64-all: ubuntu-22.04-aarch64-all:
needs: [] needs: []
stage: build stage: build
tags: tags:
- ubuntu_20.04 - ubuntu_22.04
- aarch64 - aarch64
rules: rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@ -37,15 +40,17 @@ ubuntu-20.04-aarch64-all:
script: script:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-libssh - ../configure
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1 - make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-aarch64-alldbg: ubuntu-22.04-aarch64-alldbg:
needs: [] needs: []
stage: build stage: build
tags: tags:
- ubuntu_20.04 - ubuntu_22.04
- aarch64 - aarch64
rules: rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@ -53,16 +58,18 @@ ubuntu-20.04-aarch64-alldbg:
script: script:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --enable-debug --disable-libssh - ../configure --enable-debug
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make clean - make clean
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1 - make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-aarch64-clang: ubuntu-22.04-aarch64-clang:
needs: [] needs: []
stage: build stage: build
tags: tags:
- ubuntu_20.04 - ubuntu_22.04
- aarch64 - aarch64
rules: rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@ -75,14 +82,16 @@ ubuntu-20.04-aarch64-clang:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-libssh --cc=clang-10 --cxx=clang++-10 --enable-sanitizers - ../configure --disable-libssh --cc=clang-10 --cxx=clang++-10 --enable-sanitizers
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1 - make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-aarch64-tci: ubuntu-22.04-aarch64-tci:
needs: [] needs: []
stage: build stage: build
tags: tags:
- ubuntu_20.04 - ubuntu_22.04
- aarch64 - aarch64
rules: rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@ -94,14 +103,15 @@ ubuntu-20.04-aarch64-tci:
script: script:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-libssh --enable-tcg-interpreter - ../configure --enable-tcg-interpreter
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
ubuntu-20.04-aarch64-notcg: ubuntu-22.04-aarch64-notcg:
needs: [] needs: []
stage: build stage: build
tags: tags:
- ubuntu_20.04 - ubuntu_22.04
- aarch64 - aarch64
rules: rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@ -113,6 +123,8 @@ ubuntu-20.04-aarch64-notcg:
script: script:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-libssh --disable-tcg - ../configure --disable-tcg
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1 - make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;

View File

@ -3,6 +3,5 @@
# - test (for test stages, using build artefacts from a build stage) # - test (for test stages, using build artefacts from a build stage)
stages: stages:
- containers - containers
- containers-layer2
- build - build
- test - test

View File

@ -17,7 +17,7 @@
} }
- If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) { - If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
Invoke-WebRequest Invoke-WebRequest
"https://github.com/msys2/msys2-installer/releases/download/2022-05-03/msys2-base-x86_64-20220503.sfx.exe" "https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe"
-outfile "msys64\var\cache\msys2.exe" -outfile "msys64\var\cache\msys2.exe"
} }
- msys64\var\cache\msys2.exe -y - msys64\var\cache\msys2.exe -y
@ -33,7 +33,8 @@ msys2-64bit:
extends: .shared_msys2_builder extends: .shared_msys2_builder
script: script:
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
diffutils git grep make sed bison diffutils flex
git grep make sed
mingw-w64-x86_64-capstone mingw-w64-x86_64-capstone
mingw-w64-x86_64-curl mingw-w64-x86_64-curl
mingw-w64-x86_64-cyrus-sasl mingw-w64-x86_64-cyrus-sasl
@ -57,17 +58,18 @@ msys2-64bit:
mingw-w64-x86_64-zstd " mingw-w64-x86_64-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW64' # Start a 64 bit Mingw environment - $env:MSYSTEM = 'MINGW64' # Start a 64 bit Mingw environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- .\msys64\usr\bin\bash -lc './configure --target-list=x86_64-softmmu - .\msys64\usr\bin\bash -lc './configure --target-list=x86_64-softmmu
--enable-capstone --without-default-devices' --enable-capstone --without-default-devices'
- .\msys64\usr\bin\bash -lc "sed -i '/^ROMS=/d' build/config-host.mak" - .\msys64\usr\bin\bash -lc 'make'
- .\msys64\usr\bin\bash -lc 'make -j2' - .\msys64\usr\bin\bash -lc 'make check || { cat build/meson-logs/testlog.txt; exit 1; } ;'
- .\msys64\usr\bin\bash -lc 'make check'
msys2-32bit: msys2-32bit:
extends: .shared_msys2_builder extends: .shared_msys2_builder
script: script:
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
diffutils git grep make sed bison diffutils flex
git grep make sed
mingw-w64-i686-capstone mingw-w64-i686-capstone
mingw-w64-i686-curl mingw-w64-i686-curl
mingw-w64-i686-cyrus-sasl mingw-w64-i686-cyrus-sasl
@ -89,8 +91,9 @@ msys2-32bit:
mingw-w64-i686-usbredir " mingw-w64-i686-usbredir "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinG environment - $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinG environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir output - mkdir output
- cd output - cd output
- ..\msys64\usr\bin\bash -lc "../configure --target-list=ppc64-softmmu" - ..\msys64\usr\bin\bash -lc "../configure --target-list=ppc64-softmmu"
- ..\msys64\usr\bin\bash -lc 'make -j2' - ..\msys64\usr\bin\bash -lc 'make'
- ..\msys64\usr\bin\bash -lc 'make check' - ..\msys64\usr\bin\bash -lc 'make check || { cat meson-logs/testlog.txt; exit 1; } ;'

3
.gitmodules vendored
View File

@ -46,9 +46,6 @@
[submodule "roms/edk2"] [submodule "roms/edk2"]
path = roms/edk2 path = roms/edk2
url = https://gitlab.com/qemu-project/edk2.git url = https://gitlab.com/qemu-project/edk2.git
[submodule "slirp"]
path = slirp
url = https://gitlab.com/qemu-project/libslirp.git
[submodule "roms/opensbi"] [submodule "roms/opensbi"]
path = roms/opensbi path = roms/opensbi
url = https://gitlab.com/qemu-project/opensbi.git url = https://gitlab.com/qemu-project/opensbi.git

View File

@ -65,6 +65,7 @@ James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org> Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com> Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org> Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
Paul Brook <paul@nowt.org> <paul@codesourcery.com>
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com> Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com> Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
Paul Burton <paulburton@kernel.org> <paul@archlinuxmips.org> Paul Burton <paulburton@kernel.org> <paul@archlinuxmips.org>

View File

@ -1129,7 +1129,7 @@ Virt
M: Xiaojuan Yang <yangxiaojuan@loongson.cn> M: Xiaojuan Yang <yangxiaojuan@loongson.cn>
M: Song Gao <gaosong@loongson.cn> M: Song Gao <gaosong@loongson.cn>
S: Maintained S: Maintained
F: docs/system/loongarch/loongson3.rst F: docs/system/loongarch/virt.rst
F: configs/targets/loongarch64-softmmu.mak F: configs/targets/loongarch64-softmmu.mak
F: configs/devices/loongarch64-softmmu/default.mak F: configs/devices/loongarch64-softmmu/default.mak
F: hw/loongarch/ F: hw/loongarch/
@ -1282,7 +1282,7 @@ F: hw/openrisc/openrisc_sim.c
PowerPC Machines PowerPC Machines
---------------- ----------------
405 (ref405ep and taihu) 405 (ref405ep)
L: qemu-ppc@nongnu.org L: qemu-ppc@nongnu.org
S: Orphan S: Orphan
F: hw/ppc/ppc405_boards.c F: hw/ppc/ppc405_boards.c
@ -2030,8 +2030,10 @@ virtio-blk
M: Stefan Hajnoczi <stefanha@redhat.com> M: Stefan Hajnoczi <stefanha@redhat.com>
L: qemu-block@nongnu.org L: qemu-block@nongnu.org
S: Supported S: Supported
F: hw/block/virtio-blk-common.c
F: hw/block/virtio-blk.c F: hw/block/virtio-blk.c
F: hw/block/dataplane/* F: hw/block/dataplane/*
F: include/hw/virtio/virtio-blk-common.h
F: tests/qtest/virtio-blk-test.c F: tests/qtest/virtio-blk-test.c
T: git https://github.com/stefanha/qemu.git block T: git https://github.com/stefanha/qemu.git block
@ -2098,6 +2100,14 @@ F: hw/virtio/vhost-user-rng-pci.c
F: include/hw/virtio/vhost-user-rng.h F: include/hw/virtio/vhost-user-rng.h
F: tools/vhost-user-rng/* F: tools/vhost-user-rng/*
vhost-user-gpio
M: Alex Bennée <alex.bennee@linaro.org>
R: Viresh Kumar <viresh.kumar@linaro.org>
S: Maintained
F: hw/virtio/vhost-user-gpio*
F: include/hw/virtio/vhost-user-gpio.h
F: tests/qtest/libqos/virtio-gpio.*
virtio-crypto virtio-crypto
M: Gonglei <arei.gonglei@huawei.com> M: Gonglei <arei.gonglei@huawei.com>
S: Supported S: Supported
@ -2271,11 +2281,13 @@ S: Maintained
F: contrib/vhost-user-blk/ F: contrib/vhost-user-blk/
F: contrib/vhost-user-scsi/ F: contrib/vhost-user-scsi/
F: hw/block/vhost-user-blk.c F: hw/block/vhost-user-blk.c
F: hw/block/virtio-blk-common.c
F: hw/scsi/vhost-user-scsi.c F: hw/scsi/vhost-user-scsi.c
F: hw/virtio/vhost-user-blk-pci.c F: hw/virtio/vhost-user-blk-pci.c
F: hw/virtio/vhost-user-scsi-pci.c F: hw/virtio/vhost-user-scsi-pci.c
F: include/hw/virtio/vhost-user-blk.h F: include/hw/virtio/vhost-user-blk.h
F: include/hw/virtio/vhost-user-scsi.h F: include/hw/virtio/vhost-user-scsi.h
F: include/hw/virtio/virtio-blk-common.h
vhost-user-gpu vhost-user-gpu
M: Marc-André Lureau <marcandre.lureau@redhat.com> M: Marc-André Lureau <marcandre.lureau@redhat.com>
@ -2438,6 +2450,7 @@ X: audio/jackaudio.c
X: audio/ossaudio.c X: audio/ossaudio.c
X: audio/paaudio.c X: audio/paaudio.c
X: audio/sdlaudio.c X: audio/sdlaudio.c
X: audio/sndioaudio.c
X: audio/spiceaudio.c X: audio/spiceaudio.c
F: qapi/audio.json F: qapi/audio.json
@ -2482,6 +2495,12 @@ R: Thomas Huth <huth@tuxfamily.org>
S: Odd Fixes S: Odd Fixes
F: audio/sdlaudio.c F: audio/sdlaudio.c
Sndio Audio backend
M: Gerd Hoffmann <kraxel@redhat.com>
R: Alexandre Ratchov <alex@caoua.org>
S: Odd Fixes
F: audio/sndioaudio.c
Block layer core Block layer core
M: Kevin Wolf <kwolf@redhat.com> M: Kevin Wolf <kwolf@redhat.com>
M: Hanna Reitz <hreitz@redhat.com> M: Hanna Reitz <hreitz@redhat.com>
@ -2670,7 +2689,7 @@ GDB stub
M: Alex Bennée <alex.bennee@linaro.org> M: Alex Bennée <alex.bennee@linaro.org>
R: Philippe Mathieu-Daudé <f4bug@amsat.org> R: Philippe Mathieu-Daudé <f4bug@amsat.org>
S: Maintained S: Maintained
F: gdbstub* F: gdbstub/*
F: include/exec/gdbstub.h F: include/exec/gdbstub.h
F: gdb-xml/ F: gdb-xml/
F: tests/tcg/multiarch/gdbstub/ F: tests/tcg/multiarch/gdbstub/
@ -2978,7 +2997,6 @@ F: include/hw/registerfields.h
SLIRP SLIRP
M: Samuel Thibault <samuel.thibault@ens-lyon.org> M: Samuel Thibault <samuel.thibault@ens-lyon.org>
S: Maintained S: Maintained
F: slirp/
F: net/slirp.c F: net/slirp.c
F: include/net/slirp.h F: include/net/slirp.h
T: git https://people.debian.org/~sthibault/qemu.git slirp T: git https://people.debian.org/~sthibault/qemu.git slirp
@ -3724,7 +3742,8 @@ GitLab custom runner (Works On Arm Sponsored)
M: Alex Bennée <alex.bennee@linaro.org> M: Alex Bennée <alex.bennee@linaro.org>
M: Philippe Mathieu-Daudé <f4bug@amsat.org> M: Philippe Mathieu-Daudé <f4bug@amsat.org>
S: Maintained S: Maintained
F: .gitlab-ci.d/custom-runners/ubuntu-20.04-aarch64.yml F: .gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml
F: .gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml
Documentation Documentation
------------- -------------

View File

@ -42,6 +42,9 @@ configure: ;
ifneq ($(wildcard config-host.mak),) ifneq ($(wildcard config-host.mak),)
include config-host.mak include config-host.mak
include Makefile.prereqs
Makefile.prereqs: config-host.mak
git-submodule-update: git-submodule-update:
.git-submodule-status: git-submodule-update config-host.mak .git-submodule-status: git-submodule-update config-host.mak
Makefile: .git-submodule-status Makefile: .git-submodule-status
@ -145,7 +148,7 @@ NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \
$(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \ $(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \
-d keepdepfile -d keepdepfile
ninja-cmd-goals = $(or $(MAKECMDGOALS), all) ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g)))) ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
makefile-targets := build.ninja ctags TAGS cscope dist clean uninstall makefile-targets := build.ninja ctags TAGS cscope dist clean uninstall
# "ninja -t targets" also lists all prerequisites. If build system # "ninja -t targets" also lists all prerequisites. If build system
@ -186,7 +189,7 @@ include $(SRC_PATH)/tests/Makefile.include
all: recurse-all all: recurse-all
ROMS_RULES=$(foreach t, all clean, $(addsuffix /$(t), $(ROMS))) ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS)))
.PHONY: $(ROMS_RULES) .PHONY: $(ROMS_RULES)
$(ROMS_RULES): $(ROMS_RULES):
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),) $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
@ -194,6 +197,7 @@ $(ROMS_RULES):
.PHONY: recurse-all recurse-clean .PHONY: recurse-all recurse-clean
recurse-all: $(addsuffix /all, $(ROMS)) recurse-all: $(addsuffix /all, $(ROMS))
recurse-clean: $(addsuffix /clean, $(ROMS)) recurse-clean: $(addsuffix /clean, $(ROMS))
recurse-distclean: $(addsuffix /distclean, $(ROMS))
###################################################################### ######################################################################
@ -214,10 +218,10 @@ dist: qemu-$(VERSION).tar.bz2
qemu-%.tar.bz2: qemu-%.tar.bz2:
$(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)" $(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)"
distclean: clean distclean: clean recurse-distclean
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || : -$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
rm -f config-host.mak qemu-bundle rm -f config-host.mak Makefile.prereqs qemu-bundle
rm -f tests/tcg/config-*.mak rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak
rm -f config.status rm -f config.status
rm -f roms/seabios/config.mak rm -f roms/seabios/config.mak
rm -f qemu-plugins-ld.symbols qemu-plugins-ld64.symbols rm -f qemu-plugins-ld.symbols qemu-plugins-ld64.symbols

View File

@ -39,7 +39,7 @@ Documentation can be found hosted online at
current development version that is available at current development version that is available at
`<https://www.qemu.org/docs/master/>`_ is generated from the ``docs/`` `<https://www.qemu.org/docs/master/>`_ is generated from the ``docs/``
folder in the source tree, and is built by `Sphinx folder in the source tree, and is built by `Sphinx
<https://www.sphinx-doc.org/en/master/>_`. <https://www.sphinx-doc.org/en/master/>`_.
Building Building
@ -78,7 +78,7 @@ format-patch' and/or 'git send-email' to format & send the mail to the
qemu-devel@nongnu.org mailing list. All patches submitted must contain qemu-devel@nongnu.org mailing list. All patches submitted must contain
a 'Signed-off-by' line from the author. Patches should follow the a 'Signed-off-by' line from the author. Patches should follow the
guidelines set out in the `style section guidelines set out in the `style section
<https://www.qemu.org/docs/master/devel/style.html>` of <https://www.qemu.org/docs/master/devel/style.html>`_ of
the Developers Guide. the Developers Guide.
Additional information on submitting patches can be found online via Additional information on submitting patches can be found online via

View File

@ -1 +1 @@
7.0.50 7.1.50

View File

@ -129,6 +129,16 @@ bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
return true; return true;
} }
int accel_supported_gdbstub_sstep_flags(void)
{
AccelState *accel = current_accel();
AccelClass *acc = ACCEL_GET_CLASS(accel);
if (acc->gdbstub_supported_sstep_flags) {
return acc->gdbstub_supported_sstep_flags();
}
return 0;
}
static const TypeInfo accel_cpu_type = { static const TypeInfo accel_cpu_type = {
.name = TYPE_ACCEL_CPU, .name = TYPE_ACCEL_CPU,
.parent = TYPE_OBJECT, .parent = TYPE_OBJECT,

View File

@ -16,12 +16,14 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "sysemu/kvm.h"
#include "sysemu/kvm_int.h" #include "sysemu/kvm_int.h"
#include "sysemu/runstate.h" #include "sysemu/runstate.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "qemu/guest-random.h" #include "qemu/guest-random.h"
#include "qapi/error.h" #include "qapi/error.h"
#include <linux/kvm.h>
#include "kvm-cpus.h" #include "kvm-cpus.h"
static void *kvm_vcpu_thread_fn(void *arg) static void *kvm_vcpu_thread_fn(void *arg)
@ -95,6 +97,13 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
ops->synchronize_post_init = kvm_cpu_synchronize_post_init; ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
ops->synchronize_state = kvm_cpu_synchronize_state; ops->synchronize_state = kvm_cpu_synchronize_state;
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
#ifdef KVM_CAP_SET_GUEST_DEBUG
ops->supports_guest_debug = kvm_supports_guest_debug;
ops->insert_breakpoint = kvm_insert_breakpoint;
ops->remove_breakpoint = kvm_remove_breakpoint;
ops->remove_all_breakpoints = kvm_remove_all_breakpoints;
#endif
} }
static const TypeInfo kvm_accel_ops_type = { static const TypeInfo kvm_accel_ops_type = {

View File

@ -175,7 +175,7 @@ bool kvm_direct_msi_allowed;
bool kvm_ioeventfd_any_length_allowed; bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid; bool kvm_msi_use_devid;
bool kvm_has_guest_debug; bool kvm_has_guest_debug;
int kvm_sstep_flags; static int kvm_sstep_flags;
static bool kvm_immediate_exit; static bool kvm_immediate_exit;
static hwaddr kvm_max_slot_size = ~0; static hwaddr kvm_max_slot_size = ~0;
@ -719,12 +719,32 @@ static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
{ {
return gfn->flags == KVM_DIRTY_GFN_F_DIRTY; /*
* Read the flags before the value. Pairs with barrier in
* KVM's kvm_dirty_ring_push() function.
*/
return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
} }
static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
{ {
gfn->flags = KVM_DIRTY_GFN_F_RESET; /*
* Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
* sees the full content of the ring:
*
* CPU0 CPU1 CPU2
* ------------------------------------------------------------------------------
* fill gfn0
* store-rel flags for gfn0
* load-acq flags for gfn0
* store-rel RESET for gfn0
* ioctl(RESET_RINGS)
* load-acq flags for gfn0
* check if flags have RESET
*
* The synchronization goes from CPU2 to CPU0 to CPU1.
*/
qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
} }
/* /*
@ -2265,7 +2285,7 @@ static void kvm_irqchip_create(KVMState *s)
ret = kvm_arch_irqchip_create(s); ret = kvm_arch_irqchip_create(s);
if (ret == 0) { if (ret == 0) {
if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
perror("Split IRQ chip mode not supported."); error_report("Split IRQ chip mode not supported.");
exit(1); exit(1);
} else { } else {
ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
@ -3267,8 +3287,13 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
return data.err; return data.err;
} }
int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, bool kvm_supports_guest_debug(void)
target_ulong len, int type) {
/* probed during kvm_init() */
return kvm_has_guest_debug;
}
int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
{ {
struct kvm_sw_breakpoint *bp; struct kvm_sw_breakpoint *bp;
int err; int err;
@ -3306,8 +3331,7 @@ int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
return 0; return 0;
} }
int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
target_ulong len, int type)
{ {
struct kvm_sw_breakpoint *bp; struct kvm_sw_breakpoint *bp;
int err; int err;
@ -3371,28 +3395,6 @@ void kvm_remove_all_breakpoints(CPUState *cpu)
} }
} }
#else /* !KVM_CAP_SET_GUEST_DEBUG */
int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
{
return -EINVAL;
}
int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
void kvm_remove_all_breakpoints(CPUState *cpu)
{
}
#endif /* !KVM_CAP_SET_GUEST_DEBUG */ #endif /* !KVM_CAP_SET_GUEST_DEBUG */
static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
@ -3692,6 +3694,17 @@ static void kvm_accel_instance_init(Object *obj)
s->kvm_dirty_ring_size = 0; s->kvm_dirty_ring_size = 0;
} }
/**
* kvm_gdbstub_sstep_flags():
*
* Returns: SSTEP_* flags that KVM supports for guest debug. The
* support is probed during kvm_init()
*/
static int kvm_gdbstub_sstep_flags(void)
{
return kvm_sstep_flags;
}
static void kvm_accel_class_init(ObjectClass *oc, void *data) static void kvm_accel_class_init(ObjectClass *oc, void *data)
{ {
AccelClass *ac = ACCEL_CLASS(oc); AccelClass *ac = ACCEL_CLASS(oc);
@ -3699,6 +3712,7 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data)
ac->init_machine = kvm_init; ac->init_machine = kvm_init;
ac->has_memory = kvm_accel_has_memory; ac->has_memory = kvm_accel_has_memory;
ac->allowed = &kvm_allowed; ac->allowed = &kvm_allowed;
ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
object_class_property_add(oc, "kernel-irqchip", "on|off|split", object_class_property_add(oc, "kernel-irqchip", "on|off|split",
NULL, kvm_set_kernel_irqchip, NULL, kvm_set_kernel_irqchip,
@ -3888,7 +3902,7 @@ exit:
typedef struct StatsDescriptors { typedef struct StatsDescriptors {
const char *ident; /* cache key, currently the StatsTarget */ const char *ident; /* cache key, currently the StatsTarget */
struct kvm_stats_desc *kvm_stats_desc; struct kvm_stats_desc *kvm_stats_desc;
struct kvm_stats_header *kvm_stats_header; struct kvm_stats_header kvm_stats_header;
QTAILQ_ENTRY(StatsDescriptors) next; QTAILQ_ENTRY(StatsDescriptors) next;
} StatsDescriptors; } StatsDescriptors;
@ -3919,7 +3933,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
descriptors = g_new0(StatsDescriptors, 1); descriptors = g_new0(StatsDescriptors, 1);
/* Read stats header */ /* Read stats header */
kvm_stats_header = g_malloc(sizeof(*kvm_stats_header)); kvm_stats_header = &descriptors->kvm_stats_header;
ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header)); ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
if (ret != sizeof(*kvm_stats_header)) { if (ret != sizeof(*kvm_stats_header)) {
error_setg(errp, "KVM stats: failed to read stats header: " error_setg(errp, "KVM stats: failed to read stats header: "
@ -3944,7 +3958,6 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
g_free(kvm_stats_desc); g_free(kvm_stats_desc);
return NULL; return NULL;
} }
descriptors->kvm_stats_header = kvm_stats_header;
descriptors->kvm_stats_desc = kvm_stats_desc; descriptors->kvm_stats_desc = kvm_stats_desc;
descriptors->ident = ident; descriptors->ident = ident;
QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next); QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
@ -3969,7 +3982,7 @@ static void query_stats(StatsResultList **result, StatsTarget target,
return; return;
} }
kvm_stats_header = descriptors->kvm_stats_header; kvm_stats_header = &descriptors->kvm_stats_header;
kvm_stats_desc = descriptors->kvm_stats_desc; kvm_stats_desc = descriptors->kvm_stats_desc;
size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
@ -4034,7 +4047,7 @@ static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
return; return;
} }
kvm_stats_header = descriptors->kvm_stats_header; kvm_stats_header = &descriptors->kvm_stats_header;
kvm_stats_desc = descriptors->kvm_stats_desc; kvm_stats_desc = descriptors->kvm_stats_desc;
size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
@ -4131,7 +4144,9 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp); query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
close(stats_fd); close(stats_fd);
stats_args.result.schema = result; if (first_cpu) {
stats_args.errp = errp; stats_args.result.schema = result;
run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args)); stats_args.errp = errp;
run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
}
} }

View File

@ -18,5 +18,9 @@ void kvm_destroy_vcpu(CPUState *cpu);
void kvm_cpu_synchronize_post_reset(CPUState *cpu); void kvm_cpu_synchronize_post_reset(CPUState *cpu);
void kvm_cpu_synchronize_post_init(CPUState *cpu); void kvm_cpu_synchronize_post_init(CPUState *cpu);
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu); void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
bool kvm_supports_guest_debug(void);
int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
void kvm_remove_all_breakpoints(CPUState *cpu);
#endif /* KVM_CPUS_H */ #endif /* KVM_CPUS_H */

View File

@ -46,27 +46,6 @@ int kvm_has_many_ioeventfds(void)
return 0; return 0;
} }
int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
{
return -ENOSYS;
}
int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
void kvm_remove_all_breakpoints(CPUState *cpu)
{
}
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
{ {
return 1; return 1;

View File

@ -21,6 +21,10 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
{ {
} }
void tcg_flush_jmp_cache(CPUState *cpu)
{
}
int probe_access_flags(CPUArchState *env, target_ulong addr, int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr) bool nonfault, void **phost, uintptr_t retaddr)

View File

@ -42,6 +42,7 @@
#include "sysemu/replay.h" #include "sysemu/replay.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal.h" #include "internal.h"
@ -170,22 +171,95 @@ uint32_t curr_cflags(CPUState *cpu)
return cflags; return cflags;
} }
struct tb_desc {
target_ulong pc;
target_ulong cs_base;
CPUArchState *env;
tb_page_addr_t page_addr0;
uint32_t flags;
uint32_t cflags;
uint32_t trace_vcpu_dstate;
};
static bool tb_lookup_cmp(const void *p, const void *d)
{
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
tb->page_addr[0] == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
tb_cflags(tb) == desc->cflags) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
} else {
tb_page_addr_t phys_page1;
target_ulong virt_page1;
/*
* We know that the first page matched, and an otherwise valid TB
* encountered an incomplete instruction at the end of that page,
* therefore we know that generating a new TB from the current PC
* must also require reading from the next page -- even if the
* second pages do not match, and therefore the resulting insn
* is different for the new TB. Therefore any exception raised
* here by the faulting lookup is not premature.
*/
virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
phys_page1 = get_page_addr_code(desc->env, virt_page1);
if (tb->page_addr[1] == phys_page1) {
return true;
}
}
}
return false;
}
static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
uint32_t cflags)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
uint32_t h;
desc.env = cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
desc.cflags = cflags;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
if (phys_pc == -1) {
return NULL;
}
desc.page_addr0 = phys_pc;
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
/* Might cause an exception, so have a longjmp destination ready */ /* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, target_ulong cs_base,
uint32_t flags, uint32_t cflags) uint32_t flags, uint32_t cflags)
{ {
TranslationBlock *tb; TranslationBlock *tb;
CPUJumpCache *jc;
uint32_t hash; uint32_t hash;
/* we should never be trying to look up an INVALID tb */ /* we should never be trying to look up an INVALID tb */
tcg_debug_assert(!(cflags & CF_INVALID)); tcg_debug_assert(!(cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc); hash = tb_jmp_cache_hash_func(pc);
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); jc = cpu->tb_jmp_cache;
tb = tb_jmp_cache_get_tb(jc, hash);
if (likely(tb && if (likely(tb &&
tb->pc == pc && tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
tb->cs_base == cs_base && tb->cs_base == cs_base &&
tb->flags == flags && tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate && tb->trace_vcpu_dstate == *cpu->trace_dstate &&
@ -196,16 +270,14 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
if (tb == NULL) { if (tb == NULL) {
return NULL; return NULL;
} }
qatomic_set(&cpu->tb_jmp_cache[hash], tb); tb_jmp_cache_set(jc, hash, tb, pc);
return tb; return tb;
} }
static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, static void log_cpu_exec(target_ulong pc, CPUState *cpu,
const TranslationBlock *tb) const TranslationBlock *tb)
{ {
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) if (qemu_log_in_addr_range(pc)) {
&& qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC, qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [" TARGET_FMT_lx "Trace %d: %p [" TARGET_FMT_lx
"/" TARGET_FMT_lx "/%08x/%08x] %s\n", "/" TARGET_FMT_lx "/%08x/%08x] %s\n",
@ -329,7 +401,9 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
return tcg_code_gen_epilogue; return tcg_code_gen_epilogue;
} }
log_cpu_exec(pc, cpu, tb); if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
log_cpu_exec(pc, cpu, tb);
}
return tb->tc.ptr; return tb->tc.ptr;
} }
@ -352,7 +426,9 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
TranslationBlock *last_tb; TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr; const void *tb_ptr = itb->tc.ptr;
log_cpu_exec(itb->pc, cpu, itb); if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
log_cpu_exec(log_pc(cpu, itb), cpu, itb);
}
qemu_thread_jit_execute(); qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr); ret = tcg_qemu_tb_exec(env, tb_ptr);
@ -376,16 +452,21 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
* of the start of the TB. * of the start of the TB.
*/ */
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
"Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n",
last_tb->tc.ptr, last_tb->pc,
lookup_symbol(last_tb->pc));
if (cc->tcg_ops->synchronize_from_tb) { if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb); cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else { } else {
assert(!TARGET_TB_PCREL);
assert(cc->set_pc); assert(cc->set_pc);
cc->set_pc(cpu, last_tb->pc); cc->set_pc(cpu, tb_pc(last_tb));
}
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
target_ulong pc = log_pc(cpu, last_tb);
if (qemu_log_in_addr_range(pc)) {
qemu_log("Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n",
last_tb->tc.ptr, pc, lookup_symbol(pc));
}
} }
} }
@ -462,13 +543,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb, &tb_exit); cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu); cpu_exec_exit(cpu);
} else { } else {
/*
* The mmap_lock is dropped by tb_gen_code if it runs out of
* memory.
*/
#ifndef CONFIG_SOFTMMU #ifndef CONFIG_SOFTMMU
clear_helper_retaddr(); clear_helper_retaddr();
tcg_debug_assert(!have_mmap_lock()); if (have_mmap_lock()) {
mmap_unlock();
}
#endif #endif
if (qemu_mutex_iothread_locked()) { if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
@ -487,67 +566,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
end_exclusive(); end_exclusive();
} }
struct tb_desc {
target_ulong pc;
target_ulong cs_base;
CPUArchState *env;
tb_page_addr_t phys_page1;
uint32_t flags;
uint32_t cflags;
uint32_t trace_vcpu_dstate;
};
static bool tb_lookup_cmp(const void *p, const void *d)
{
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if (tb->pc == desc->pc &&
tb->page_addr[0] == desc->phys_page1 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
tb_cflags(tb) == desc->cflags) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
} else {
tb_page_addr_t phys_page2;
target_ulong virt_page2;
virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
phys_page2 = get_page_addr_code(desc->env, virt_page2);
if (tb->page_addr[1] == phys_page2) {
return true;
}
}
}
return false;
}
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
uint32_t cflags)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
uint32_t h;
desc.env = cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
desc.cflags = cflags;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
if (phys_pc == -1) {
return NULL;
}
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
{ {
if (TCG_TARGET_HAS_direct_jump) { if (TCG_TARGET_HAS_direct_jump) {
@ -590,11 +608,8 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
qemu_spin_unlock(&tb_next->jmp_lock); qemu_spin_unlock(&tb_next->jmp_lock);
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
"Linking TBs %p [" TARGET_FMT_lx tb->tc.ptr, n, tb_next->tc.ptr);
"] index %d -> %p [" TARGET_FMT_lx "]\n",
tb->tc.ptr, tb->pc, n,
tb_next->tc.ptr, tb_next->pc);
return; return;
out_unlock_next: out_unlock_next:
@ -852,11 +867,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
} }
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
target_ulong pc,
TranslationBlock **last_tb, int *tb_exit) TranslationBlock **last_tb, int *tb_exit)
{ {
int32_t insns_left; int32_t insns_left;
trace_exec_tb(tb, tb->pc); trace_exec_tb(tb, pc);
tb = cpu_tb_exec(cpu, tb, tb_exit); tb = cpu_tb_exec(cpu, tb, tb_exit);
if (*tb_exit != TB_EXIT_REQUESTED) { if (*tb_exit != TB_EXIT_REQUESTED) {
*last_tb = tb; *last_tb = tb;
@ -964,7 +980,9 @@ int cpu_exec(CPUState *cpu)
#ifndef CONFIG_SOFTMMU #ifndef CONFIG_SOFTMMU
clear_helper_retaddr(); clear_helper_retaddr();
tcg_debug_assert(!have_mmap_lock()); if (have_mmap_lock()) {
mmap_unlock();
}
#endif #endif
if (qemu_mutex_iothread_locked()) { if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
@ -1006,6 +1024,8 @@ int cpu_exec(CPUState *cpu)
tb = tb_lookup(cpu, pc, cs_base, flags, cflags); tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) { if (tb == NULL) {
uint32_t h;
mmap_lock(); mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock(); mmap_unlock();
@ -1013,7 +1033,8 @@ int cpu_exec(CPUState *cpu)
* We add the TB in the virtual pc hash table * We add the TB in the virtual pc hash table
* for the fast lookup * for the fast lookup
*/ */
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); h = tb_jmp_cache_hash_func(pc);
tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
@ -1052,7 +1073,7 @@ int cpu_exec(CPUState *cpu)
//// --- End LibAFL code --- //// --- End LibAFL code ---
} }
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks /* Try to align the host and virtual clocks
if the guest is in advance */ if the guest is in advance */

View File

@ -100,21 +100,14 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
{ {
unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); int i, i0 = tb_jmp_cache_hash_page(page_addr);
CPUJumpCache *jc = cpu->tb_jmp_cache;
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); qatomic_set(&jc->array[i0 + i].tb, NULL);
} }
} }
static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
{
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
tb_jmp_cache_clear_page(cpu, addr);
}
/** /**
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
* @desc: The CPUTLBDesc portion of the TLB * @desc: The CPUTLBDesc portion of the TLB
@ -200,13 +193,13 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
} }
g_free(fast->table); g_free(fast->table);
g_free(desc->iotlb); g_free(desc->fulltlb);
tlb_window_reset(desc, now, 0); tlb_window_reset(desc, now, 0);
/* desc->n_used_entries is cleared by the caller */ /* desc->n_used_entries is cleared by the caller */
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
fast->table = g_try_new(CPUTLBEntry, new_size); fast->table = g_try_new(CPUTLBEntry, new_size);
desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
/* /*
* If the allocations fail, try smaller sizes. We just freed some * If the allocations fail, try smaller sizes. We just freed some
@ -215,7 +208,7 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
* allocations to fail though, so we progressively reduce the allocation * allocations to fail though, so we progressively reduce the allocation
* size, aborting if we cannot even allocate the smallest TLB we support. * size, aborting if we cannot even allocate the smallest TLB we support.
*/ */
while (fast->table == NULL || desc->iotlb == NULL) { while (fast->table == NULL || desc->fulltlb == NULL) {
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
error_report("%s: %s", __func__, strerror(errno)); error_report("%s: %s", __func__, strerror(errno));
abort(); abort();
@ -224,9 +217,9 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
g_free(fast->table); g_free(fast->table);
g_free(desc->iotlb); g_free(desc->fulltlb);
fast->table = g_try_new(CPUTLBEntry, new_size); fast->table = g_try_new(CPUTLBEntry, new_size);
desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
} }
} }
@ -258,7 +251,7 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
desc->n_used_entries = 0; desc->n_used_entries = 0;
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
fast->table = g_new(CPUTLBEntry, n_entries); fast->table = g_new(CPUTLBEntry, n_entries);
desc->iotlb = g_new(CPUIOTLBEntry, n_entries); desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
tlb_mmu_flush_locked(desc, fast); tlb_mmu_flush_locked(desc, fast);
} }
@ -299,7 +292,7 @@ void tlb_destroy(CPUState *cpu)
CPUTLBDescFast *fast = &env_tlb(env)->f[i]; CPUTLBDescFast *fast = &env_tlb(env)->f[i];
g_free(fast->table); g_free(fast->table);
g_free(desc->iotlb); g_free(desc->fulltlb);
} }
} }
@ -364,7 +357,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
qemu_spin_unlock(&env_tlb(env)->c.lock); qemu_spin_unlock(&env_tlb(env)->c.lock);
cpu_tb_jmp_cache_clear(cpu); tcg_flush_jmp_cache(cpu);
if (to_clean == ALL_MMUIDX_BITS) { if (to_clean == ALL_MMUIDX_BITS) {
qatomic_set(&env_tlb(env)->c.full_flush_count, qatomic_set(&env_tlb(env)->c.full_flush_count,
@ -541,7 +534,12 @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
} }
qemu_spin_unlock(&env_tlb(env)->c.lock); qemu_spin_unlock(&env_tlb(env)->c.lock);
tb_flush_jmp_cache(cpu, addr); /*
* Discard jump cache entries for any tb which might potentially
* overlap the flushed page, which includes the previous.
*/
tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
tb_jmp_cache_clear_page(cpu, addr);
} }
/** /**
@ -788,12 +786,18 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
* longer to clear each entry individually than it will to clear it all. * longer to clear each entry individually than it will to clear it all.
*/ */
if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
cpu_tb_jmp_cache_clear(cpu); tcg_flush_jmp_cache(cpu);
return; return;
} }
for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { /*
tb_flush_jmp_cache(cpu, d.addr + i); * Discard jump cache entries for any tb which might potentially
* overlap the flushed pages, which includes the previous.
*/
d.addr -= TARGET_PAGE_SIZE;
for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
tb_jmp_cache_clear_page(cpu, d.addr);
d.addr += TARGET_PAGE_SIZE;
} }
} }
@ -951,7 +955,8 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
can be detected */ can be detected */
void tlb_protect_code(ram_addr_t ram_addr) void tlb_protect_code(ram_addr_t ram_addr)
{ {
cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE); DIRTY_MEMORY_CODE);
} }
@ -1095,16 +1100,16 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
} }
/* Add a new TLB entry. At most one entry for a given virtual address /*
* Add a new TLB entry. At most one entry for a given virtual address
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
* supplied size is only used by tlb_flush_page. * supplied size is only used by tlb_flush_page.
* *
* Called from TCG-generated code, which is under an RCU read-side * Called from TCG-generated code, which is under an RCU read-side
* critical section. * critical section.
*/ */
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, void tlb_set_page_full(CPUState *cpu, int mmu_idx,
hwaddr paddr, MemTxAttrs attrs, int prot, target_ulong vaddr, CPUTLBEntryFull *full)
int mmu_idx, target_ulong size)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
CPUTLB *tlb = env_tlb(env); CPUTLB *tlb = env_tlb(env);
@ -1117,35 +1122,36 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
CPUTLBEntry *te, tn; CPUTLBEntry *te, tn;
hwaddr iotlb, xlat, sz, paddr_page; hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page; target_ulong vaddr_page;
int asidx = cpu_asidx_from_attrs(cpu, attrs); int asidx, wp_flags, prot;
int wp_flags;
bool is_ram, is_romd; bool is_ram, is_romd;
assert_cpu_is_self(cpu); assert_cpu_is_self(cpu);
if (size <= TARGET_PAGE_SIZE) { if (full->lg_page_size <= TARGET_PAGE_BITS) {
sz = TARGET_PAGE_SIZE; sz = TARGET_PAGE_SIZE;
} else { } else {
tlb_add_large_page(env, mmu_idx, vaddr, size); sz = (hwaddr)1 << full->lg_page_size;
sz = size; tlb_add_large_page(env, mmu_idx, vaddr, sz);
} }
vaddr_page = vaddr & TARGET_PAGE_MASK; vaddr_page = vaddr & TARGET_PAGE_MASK;
paddr_page = paddr & TARGET_PAGE_MASK; paddr_page = full->phys_addr & TARGET_PAGE_MASK;
prot = full->prot;
asidx = cpu_asidx_from_attrs(cpu, full->attrs);
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
&xlat, &sz, attrs, &prot); &xlat, &sz, full->attrs, &prot);
assert(sz >= TARGET_PAGE_SIZE); assert(sz >= TARGET_PAGE_SIZE);
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
" prot=%x idx=%d\n", " prot=%x idx=%d\n",
vaddr, paddr, prot, mmu_idx); vaddr, full->phys_addr, prot, mmu_idx);
address = vaddr_page; address = vaddr_page;
if (size < TARGET_PAGE_SIZE) { if (full->lg_page_size < TARGET_PAGE_BITS) {
/* Repeat the MMU check and TLB fill on every access. */ /* Repeat the MMU check and TLB fill on every access. */
address |= TLB_INVALID_MASK; address |= TLB_INVALID_MASK;
} }
if (attrs.byte_swap) { if (full->attrs.byte_swap) {
address |= TLB_BSWAP; address |= TLB_BSWAP;
} }
@ -1219,7 +1225,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
/* Evict the old entry into the victim tlb. */ /* Evict the old entry into the victim tlb. */
copy_tlb_helper_locked(tv, te); copy_tlb_helper_locked(tv, te);
desc->viotlb[vidx] = desc->iotlb[index]; desc->vfulltlb[vidx] = desc->fulltlb[index];
tlb_n_used_entries_dec(env, mmu_idx); tlb_n_used_entries_dec(env, mmu_idx);
} }
@ -1236,8 +1242,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* subtract here is that of the page base, and not the same as the * subtract here is that of the page base, and not the same as the
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/ */
desc->iotlb[index].addr = iotlb - vaddr_page; desc->fulltlb[index] = *full;
desc->iotlb[index].attrs = attrs; desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
desc->fulltlb[index].phys_addr = paddr_page;
desc->fulltlb[index].prot = prot;
/* Now calculate the new entry */ /* Now calculate the new entry */
tn.addend = addend - vaddr_page; tn.addend = addend - vaddr_page;
@ -1272,9 +1280,21 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
qemu_spin_unlock(&tlb->c.lock); qemu_spin_unlock(&tlb->c.lock);
} }
/* Add a new TLB entry, but without specifying the memory void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* transaction attributes to be used. hwaddr paddr, MemTxAttrs attrs, int prot,
*/ int mmu_idx, target_ulong size)
{
CPUTLBEntryFull full = {
.phys_addr = paddr,
.attrs = attrs,
.prot = prot,
.lg_page_size = ctz64(size)
};
assert(is_power_of_2(size));
tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
}
void tlb_set_page(CPUState *cpu, target_ulong vaddr, void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot, hwaddr paddr, int prot,
int mmu_idx, target_ulong size) int mmu_idx, target_ulong size)
@ -1283,18 +1303,6 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
prot, mmu_idx, size); prot, mmu_idx, size);
} }
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
ram_addr_t ram_addr;
ram_addr = qemu_ram_addr_from_host(ptr);
if (ram_addr == RAM_ADDR_INVALID) {
error_report("Bad ram pointer %p", ptr);
abort();
}
return ram_addr;
}
/* /*
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
@ -1303,15 +1311,14 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
static void tlb_fill(CPUState *cpu, target_ulong addr, int size, static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu);
bool ok; bool ok;
/* /*
* This is not a probe, so only valid return is success; failure * This is not a probe, so only valid return is success; failure
* should result in exception + longjmp to the cpu loop. * should result in exception + longjmp to the cpu loop.
*/ */
ok = cc->tcg_ops->tlb_fill(cpu, addr, size, ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
access_type, mmu_idx, false, retaddr); access_type, mmu_idx, false, retaddr);
assert(ok); assert(ok);
} }
@ -1319,9 +1326,8 @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
MMUAccessType access_type, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr) int mmu_idx, uintptr_t retaddr)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
mmu_idx, retaddr);
cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
} }
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
@ -1341,7 +1347,7 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
} }
} }
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
int mmu_idx, target_ulong addr, uintptr_t retaddr, int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op) MMUAccessType access_type, MemOp op)
{ {
@ -1353,9 +1359,9 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false; bool locked = false;
MemTxResult r; MemTxResult r;
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
mr = section->mr; mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr; cpu->mem_io_pc = retaddr;
if (!cpu->can_do_io) { if (!cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr); cpu_io_recompile(cpu, retaddr);
@ -1365,14 +1371,14 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
locked = true; locked = true;
} }
r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
if (r != MEMTX_OK) { if (r != MEMTX_OK) {
hwaddr physaddr = mr_offset + hwaddr physaddr = mr_offset +
section->offset_within_address_space - section->offset_within_address_space -
section->offset_within_region; section->offset_within_region;
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
mmu_idx, iotlbentry->attrs, r, retaddr); mmu_idx, full->attrs, r, retaddr);
} }
if (locked) { if (locked) {
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
@ -1382,22 +1388,21 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
} }
/* /*
* Save a potentially trashed IOTLB entry for later lookup by plugin. * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
* This is read by tlb_plugin_lookup if the iotlb entry doesn't match * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
* because of the side effect of io_writex changing memory layout. * because of the side effect of io_writex changing memory layout.
*/ */
static void save_iotlb_data(CPUState *cs, hwaddr addr, static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
MemoryRegionSection *section, hwaddr mr_offset) hwaddr mr_offset)
{ {
#ifdef CONFIG_PLUGIN #ifdef CONFIG_PLUGIN
SavedIOTLB *saved = &cs->saved_iotlb; SavedIOTLB *saved = &cs->saved_iotlb;
saved->addr = addr;
saved->section = section; saved->section = section;
saved->mr_offset = mr_offset; saved->mr_offset = mr_offset;
#endif #endif
} }
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
int mmu_idx, uint64_t val, target_ulong addr, int mmu_idx, uint64_t val, target_ulong addr,
uintptr_t retaddr, MemOp op) uintptr_t retaddr, MemOp op)
{ {
@ -1408,9 +1413,9 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false; bool locked = false;
MemTxResult r; MemTxResult r;
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
mr = section->mr; mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
if (!cpu->can_do_io) { if (!cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr); cpu_io_recompile(cpu, retaddr);
} }
@ -1420,20 +1425,20 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
* The memory_region_dispatch may trigger a flush/resize * The memory_region_dispatch may trigger a flush/resize
* so for plugins we save the iotlb_data just in case. * so for plugins we save the iotlb_data just in case.
*/ */
save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); save_iotlb_data(cpu, section, mr_offset);
if (!qemu_mutex_iothread_locked()) { if (!qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
locked = true; locked = true;
} }
r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
if (r != MEMTX_OK) { if (r != MEMTX_OK) {
hwaddr physaddr = mr_offset + hwaddr physaddr = mr_offset +
section->offset_within_address_space - section->offset_within_address_space -
section->offset_within_region; section->offset_within_region;
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, MMU_DATA_STORE, mmu_idx, full->attrs, r,
retaddr); retaddr);
} }
if (locked) { if (locked) {
@ -1480,9 +1485,10 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
copy_tlb_helper_locked(vtlb, &tmptlb); copy_tlb_helper_locked(vtlb, &tmptlb);
qemu_spin_unlock(&env_tlb(env)->c.lock); qemu_spin_unlock(&env_tlb(env)->c.lock);
CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
tmpio = *io; *io = *vio; *vio = tmpio; CPUTLBEntryFull tmpf;
tmpf = *f1; *f1 = *f2; *f2 = tmpf;
return true; return true;
} }
} }
@ -1494,65 +1500,10 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
(ADDR) & TARGET_PAGE_MASK) (ADDR) & TARGET_PAGE_MASK)
/*
* Return a ram_addr_t for the virtual address for execution.
*
* Return -1 if we can't translate and execute from an entire page
* of RAM. This will force us to execute by loading and translating
* one insn at a time, without caching.
*
* NOTE: This function will trigger an exception if the page is
* not executable.
*/
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
uintptr_t mmu_idx = cpu_mmu_index(env, true);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
void *p;
if (unlikely(!tlb_hit(entry->addr_code, addr))) {
if (!VICTIM_TLB_HIT(addr_code, addr)) {
tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
/*
* The MMU protection covers a smaller range than a target
* page, so we must redo the MMU check for every insn.
*/
return -1;
}
}
assert(tlb_hit(entry->addr_code, addr));
}
if (unlikely(entry->addr_code & TLB_MMIO)) {
/* The region is not backed by RAM. */
if (hostp) {
*hostp = NULL;
}
return -1;
}
p = (void *)((uintptr_t)addr + entry->addend);
if (hostp) {
*hostp = p;
}
return qemu_ram_addr_from_host_nofail(p);
}
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
{
return get_page_addr_code_hostp(env, addr, NULL);
}
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) CPUTLBEntryFull *full, uintptr_t retaddr)
{ {
ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
@ -1579,7 +1530,8 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
static int probe_access_internal(CPUArchState *env, target_ulong addr, static int probe_access_internal(CPUArchState *env, target_ulong addr,
int fault_size, MMUAccessType access_type, int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault, int mmu_idx, bool nonfault,
void **phost, uintptr_t retaddr) void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr)
{ {
uintptr_t index = tlb_index(env, mmu_idx, addr); uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
@ -1602,25 +1554,36 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
} }
tlb_addr = tlb_read_ofs(entry, elt_ofs); tlb_addr = tlb_read_ofs(entry, elt_ofs);
flags = TLB_FLAGS_MASK;
page_addr = addr & TARGET_PAGE_MASK; page_addr = addr & TARGET_PAGE_MASK;
if (!tlb_hit_page(tlb_addr, page_addr)) { if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
CPUClass *cc = CPU_GET_CLASS(cs);
if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
mmu_idx, nonfault, retaddr)) { mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */ /* Non-faulting page table read failed. */
*phost = NULL; *phost = NULL;
*pfull = NULL;
return TLB_INVALID_MASK; return TLB_INVALID_MASK;
} }
/* TLB resize via tlb_fill may have moved the entry. */ /* TLB resize via tlb_fill may have moved the entry. */
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr);
/*
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
* to force the next access through tlb_fill. We've just
* called tlb_fill, so we know that this entry *is* valid.
*/
flags &= ~TLB_INVALID_MASK;
} }
tlb_addr = tlb_read_ofs(entry, elt_ofs); tlb_addr = tlb_read_ofs(entry, elt_ofs);
} }
flags = tlb_addr & TLB_FLAGS_MASK; flags &= tlb_addr;
*pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@ -1633,37 +1596,44 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
return flags; return flags;
} }
int probe_access_flags(CPUArchState *env, target_ulong addr, int probe_access_full(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr) bool nonfault, void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr)
{ {
int flags; int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
nonfault, phost, pfull, retaddr);
flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
nonfault, phost, retaddr);
/* Handle clean RAM pages. */ /* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) { if (unlikely(flags & TLB_NOTDIRTY)) {
uintptr_t index = tlb_index(env, mmu_idx, addr); notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
flags &= ~TLB_NOTDIRTY; flags &= ~TLB_NOTDIRTY;
} }
return flags; return flags;
} }
int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
CPUTLBEntryFull *full;
return probe_access_full(env, addr, access_type, mmu_idx,
nonfault, phost, &full, retaddr);
}
void *probe_access(CPUArchState *env, target_ulong addr, int size, void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{ {
CPUTLBEntryFull *full;
void *host; void *host;
int flags; int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size); g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, mmu_idx, flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
false, &host, retaddr); false, &host, &full, retaddr);
/* Per the interface, size == 0 merely faults the access. */ /* Per the interface, size == 0 merely faults the access. */
if (size == 0) { if (size == 0) {
@ -1671,20 +1641,17 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
} }
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
/* Handle watchpoints. */ /* Handle watchpoints. */
if (flags & TLB_WATCHPOINT) { if (flags & TLB_WATCHPOINT) {
int wp_access = (access_type == MMU_DATA_STORE int wp_access = (access_type == MMU_DATA_STORE
? BP_MEM_WRITE : BP_MEM_READ); ? BP_MEM_WRITE : BP_MEM_READ);
cpu_check_watchpoint(env_cpu(env), addr, size, cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, wp_access, retaddr); full->attrs, wp_access, retaddr);
} }
/* Handle clean RAM pages. */ /* Handle clean RAM pages. */
if (flags & TLB_NOTDIRTY) { if (flags & TLB_NOTDIRTY) {
notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); notdirty_write(env_cpu(env), addr, 1, full, retaddr);
} }
} }
@ -1694,16 +1661,44 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx) MMUAccessType access_type, int mmu_idx)
{ {
CPUTLBEntryFull *full;
void *host; void *host;
int flags; int flags;
flags = probe_access_internal(env, addr, 0, access_type, flags = probe_access_internal(env, addr, 0, access_type,
mmu_idx, true, &host, 0); mmu_idx, true, &host, &full, 0);
/* No combination of flags are expected by the caller. */ /* No combination of flags are expected by the caller. */
return flags ? NULL : host; return flags ? NULL : host;
} }
/*
* Return a ram_addr_t for the virtual address for execution.
*
* Return -1 if we can't translate and execute from an entire page
* of RAM. This will force us to execute by loading and translating
* one insn at a time, without caching.
*
* NOTE: This function will trigger an exception if the page is
* not executable.
*/
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
CPUTLBEntryFull *full;
void *p;
(void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
cpu_mmu_index(env, true), false, &p, &full, 0);
if (p == NULL) {
return -1;
}
if (hostp) {
*hostp = p;
}
return qemu_ram_addr_from_host_nofail(p);
}
#ifdef CONFIG_PLUGIN #ifdef CONFIG_PLUGIN
/* /*
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
@ -1715,7 +1710,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
* should have just filled the TLB. The one corner case is io_writex * should have just filled the TLB. The one corner case is io_writex
* which can cause TLB flushes and potential resizing of the TLBs * which can cause TLB flushes and potential resizing of the TLBs
* losing the information we need. In those cases we need to recover * losing the information we need. In those cases we need to recover
* data from a copy of the iotlbentry. As long as this always occurs * data from a copy of the CPUTLBEntryFull. As long as this always occurs
* from the same thread (which a mem callback will be) this is safe. * from the same thread (which a mem callback will be) this is safe.
*/ */
@ -1730,11 +1725,12 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
if (likely(tlb_hit(tlb_addr, addr))) { if (likely(tlb_hit(tlb_addr, addr))) {
/* We must have an iotlb entry for MMIO */ /* We must have an iotlb entry for MMIO */
if (tlb_addr & TLB_MMIO) { if (tlb_addr & TLB_MMIO) {
CPUIOTLBEntry *iotlbentry; CPUTLBEntryFull *full;
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
data->is_io = true; data->is_io = true;
data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); data->v.io.section =
data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; iotlb_to_section(cpu, full->xlat_section, full->attrs);
data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
} else { } else {
data->is_io = false; data->is_io = false;
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
@ -1842,7 +1838,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if (unlikely(tlb_addr & TLB_NOTDIRTY)) { if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
notdirty_write(env_cpu(env), addr, size, notdirty_write(env_cpu(env), addr, size,
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
} }
return hostaddr; return hostaddr;
@ -1950,7 +1946,7 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
/* Handle anything that isn't just a straight memory access. */ /* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry; CPUTLBEntryFull *full;
bool need_swap; bool need_swap;
/* For anything that is unaligned, recurse through full_load. */ /* For anything that is unaligned, recurse through full_load. */
@ -1958,20 +1954,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
goto do_unaligned_access; goto do_unaligned_access;
} }
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
/* Handle watchpoints. */ /* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) { if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */ /* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size, cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_READ, retaddr); full->attrs, BP_MEM_READ, retaddr);
} }
need_swap = size > 1 && (tlb_addr & TLB_BSWAP); need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
/* Handle I/O access. */ /* Handle I/O access. */
if (likely(tlb_addr & TLB_MMIO)) { if (likely(tlb_addr & TLB_MMIO)) {
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, return io_readx(env, full, mmu_idx, addr, retaddr,
access_type, op ^ (need_swap * MO_BSWAP)); access_type, op ^ (need_swap * MO_BSWAP));
} }
@ -2286,12 +2282,12 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
*/ */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) { if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
cpu_check_watchpoint(env_cpu(env), addr, size - size2, cpu_check_watchpoint(env_cpu(env), addr, size - size2,
env_tlb(env)->d[mmu_idx].iotlb[index].attrs, env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
BP_MEM_WRITE, retaddr); BP_MEM_WRITE, retaddr);
} }
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
cpu_check_watchpoint(env_cpu(env), page2, size2, cpu_check_watchpoint(env_cpu(env), page2, size2,
env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
BP_MEM_WRITE, retaddr); BP_MEM_WRITE, retaddr);
} }
@ -2355,7 +2351,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle anything that isn't just a straight memory access. */ /* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry; CPUTLBEntryFull *full;
bool need_swap; bool need_swap;
/* For anything that is unaligned, recurse through byte stores. */ /* For anything that is unaligned, recurse through byte stores. */
@ -2363,20 +2359,20 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
goto do_unaligned_access; goto do_unaligned_access;
} }
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
/* Handle watchpoints. */ /* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) { if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */ /* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size, cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_WRITE, retaddr); full->attrs, BP_MEM_WRITE, retaddr);
} }
need_swap = size > 1 && (tlb_addr & TLB_BSWAP); need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
/* Handle I/O access. */ /* Handle I/O access. */
if (tlb_addr & TLB_MMIO) { if (tlb_addr & TLB_MMIO) {
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, io_writex(env, full, mmu_idx, val, addr, retaddr,
op ^ (need_swap * MO_BSWAP)); op ^ (need_swap * MO_BSWAP));
return; return;
} }
@ -2388,7 +2384,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle clean RAM pages. */ /* Handle clean RAM pages. */
if (tlb_addr & TLB_NOTDIRTY) { if (tlb_addr & TLB_NOTDIRTY) {
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); notdirty_write(env_cpu(env), addr, size, full, retaddr);
} }
haddr = (void *)((uintptr_t)addr + entry->addend); haddr = (void *)((uintptr_t)addr + entry->addend);

View File

@ -18,4 +18,14 @@ G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
void page_init(void); void page_init(void);
void tb_htable_init(void); void tb_htable_init(void);
/* Return the current PC from CPU, which may be cached in TB. */
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return cpu->cc->get_pc(cpu);
#else
return tb_pc(tb);
#endif
}
#endif /* ACCEL_TCG_INTERNAL_H */ #endif /* ACCEL_TCG_INTERNAL_H */

View File

@ -852,7 +852,8 @@ static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
pr_ops(); pr_ops();
} }
bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only) bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
bool mem_only)
{ {
bool ret = false; bool ret = false;
@ -870,9 +871,9 @@ bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_onl
ret = true; ret = true;
ptb->vaddr = tb->pc; ptb->vaddr = db->pc_first;
ptb->vaddr2 = -1; ptb->vaddr2 = -1;
get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1); ptb->haddr1 = db->host_addr[0];
ptb->haddr2 = NULL; ptb->haddr2 = NULL;
ptb->mem_only = mem_only; ptb->mem_only = mem_only;
@ -898,16 +899,15 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
* Note that we skip this when haddr1 == NULL, e.g. when we're * Note that we skip this when haddr1 == NULL, e.g. when we're
* fetching instructions from a region not backed by RAM. * fetching instructions from a region not backed by RAM.
*/ */
if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) && if (ptb->haddr1 == NULL) {
unlikely((db->pc_next & TARGET_PAGE_MASK) != pinsn->haddr = NULL;
(db->pc_first & TARGET_PAGE_MASK))) { } else if (is_same_page(db, db->pc_next)) {
get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
&ptb->haddr2);
ptb->vaddr2 = db->pc_next;
}
if (likely(ptb->vaddr2 == -1)) {
pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
} else { } else {
if (ptb->vaddr2 == -1) {
ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
}
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
} }
} }

View File

@ -23,6 +23,7 @@
#include "exec/cpu-defs.h" #include "exec/cpu-defs.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "qemu/xxhash.h" #include "qemu/xxhash.h"
#include "tb-jmp-cache.h"
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU

65
accel/tcg/tb-jmp-cache.h Normal file
View File

@ -0,0 +1,65 @@
/*
* The per-CPU TranslationBlock jump cache.
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef ACCEL_TCG_TB_JMP_CACHE_H
#define ACCEL_TCG_TB_JMP_CACHE_H
#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/*
* Accessed in parallel; all accesses to 'tb' must be atomic.
* For TARGET_TB_PCREL, accesses to 'pc' must be protected by
* a load_acquire/store_release to 'tb'.
*/
struct CPUJumpCache {
struct {
TranslationBlock *tb;
#if TARGET_TB_PCREL
target_ulong pc;
#endif
} array[TB_JMP_CACHE_SIZE];
};
static inline TranslationBlock *
tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
{
#if TARGET_TB_PCREL
/* Use acquire to ensure current load of pc from jc. */
return qatomic_load_acquire(&jc->array[hash].tb);
#else
/* Use rcu_read to ensure current load of pc from *tb. */
return qatomic_rcu_read(&jc->array[hash].tb);
#endif
}
static inline target_ulong
tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return jc->array[hash].pc;
#else
return tb_pc(tb);
#endif
}
static inline void
tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
TranslationBlock *tb, target_ulong pc)
{
#if TARGET_TB_PCREL
jc->array[hash].pc = pc;
/* Use store_release on tb to ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
#else
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
#endif
}
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */

View File

@ -109,7 +109,13 @@ void icount_prepare_for_run(CPUState *cpu)
replay_mutex_lock(); replay_mutex_lock();
if (cpu->icount_budget == 0) { if (cpu->icount_budget == 0) {
/*
* We're called without the iothread lock, so must take it while
* we're calling timer handlers.
*/
qemu_mutex_lock_iothread();
icount_notify_aio_contexts(); icount_notify_aio_contexts();
qemu_mutex_unlock_iothread();
} }
} }

View File

@ -32,6 +32,8 @@
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/guest-random.h" #include "qemu/guest-random.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/hwaddr.h"
#include "exec/gdbstub.h"
#include "tcg-accel-ops.h" #include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h" #include "tcg-accel-ops-mttcg.h"
@ -91,6 +93,97 @@ void tcg_handle_interrupt(CPUState *cpu, int mask)
} }
} }
static bool tcg_supports_guest_debug(void)
{
return true;
}
/* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */
static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
{
static const int xlat[] = {
[GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
[GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
[GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
};
CPUClass *cc = CPU_GET_CLASS(cpu);
int cputype = xlat[gdbtype];
if (cc->gdb_stop_before_watchpoint) {
cputype |= BP_STOP_BEFORE_ACCESS;
}
return cputype;
}
static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
{
CPUState *cpu;
int err = 0;
switch (type) {
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
CPU_FOREACH(cpu) {
err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
if (err) {
break;
}
}
return err;
case GDB_WATCHPOINT_WRITE:
case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS:
CPU_FOREACH(cpu) {
err = cpu_watchpoint_insert(cpu, addr, len,
xlat_gdb_type(cpu, type), NULL);
if (err) {
break;
}
}
return err;
default:
return -ENOSYS;
}
}
static int tcg_remove_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
{
CPUState *cpu;
int err = 0;
switch (type) {
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
CPU_FOREACH(cpu) {
err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
if (err) {
break;
}
}
return err;
case GDB_WATCHPOINT_WRITE:
case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS:
CPU_FOREACH(cpu) {
err = cpu_watchpoint_remove(cpu, addr, len,
xlat_gdb_type(cpu, type));
if (err) {
break;
}
}
return err;
default:
return -ENOSYS;
}
}
static inline void tcg_remove_all_breakpoints(CPUState *cpu)
{
cpu_breakpoint_remove_all(cpu, BP_GDB);
cpu_watchpoint_remove_all(cpu, BP_GDB);
}
static void tcg_accel_ops_init(AccelOpsClass *ops) static void tcg_accel_ops_init(AccelOpsClass *ops)
{ {
if (qemu_tcg_mttcg_enabled()) { if (qemu_tcg_mttcg_enabled()) {
@ -109,6 +202,11 @@ static void tcg_accel_ops_init(AccelOpsClass *ops)
ops->handle_interrupt = tcg_handle_interrupt; ops->handle_interrupt = tcg_handle_interrupt;
} }
} }
ops->supports_guest_debug = tcg_supports_guest_debug;
ops->insert_breakpoint = tcg_insert_breakpoint;
ops->remove_breakpoint = tcg_remove_breakpoint;
ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
} }
static void tcg_accel_ops_class_init(ObjectClass *oc, void *data) static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)

View File

@ -25,6 +25,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
#include "tcg/tcg.h" #include "tcg/tcg.h"
#include "qapi/error.h" #include "qapi/error.h"
@ -215,12 +216,28 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
s->splitwx_enabled = value; s->splitwx_enabled = value;
} }
static int tcg_gdbstub_supported_sstep_flags(void)
{
/*
* In replay mode all events will come from the log and can't be
* suppressed otherwise we would break determinism. However as those
* events are tied to the number of executed instructions we won't see
* them occurring every time we single step.
*/
if (replay_mode != REPLAY_MODE_NONE) {
return SSTEP_ENABLE;
} else {
return SSTEP_ENABLE | SSTEP_NOIRQ | SSTEP_NOTIMER;
}
}
static void tcg_accel_class_init(ObjectClass *oc, void *data) static void tcg_accel_class_init(ObjectClass *oc, void *data)
{ {
AccelClass *ac = ACCEL_CLASS(oc); AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "tcg"; ac->name = "tcg";
ac->init_machine = tcg_init_machine; ac->init_machine = tcg_init_machine;
ac->allowed = &tcg_allowed; ac->allowed = &tcg_allowed;
ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
object_class_property_add_str(oc, "thread", object_class_property_add_str(oc, "thread",
tcg_get_thread, tcg_get_thread,

View File

@ -46,6 +46,7 @@
#include "exec/cputlb.h" #include "exec/cputlb.h"
#include "exec/translate-all.h" #include "exec/translate-all.h"
#include "exec/translator.h"
#include "qemu/bitmap.h" #include "qemu/bitmap.h"
#include "qemu/qemu-print.h" #include "qemu/qemu-print.h"
#include "qemu/timer.h" #include "qemu/timer.h"
@ -57,6 +58,7 @@
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "hw/core/tcg-cpu-ops.h" #include "hw/core/tcg-cpu-ops.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal.h" #include "internal.h"
@ -730,21 +732,14 @@ void libafl_add_backdoor_hook(void (*exec)(target_ulong id, uint64_t data),
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#endif #endif
#define SMC_BITMAP_USE_THRESHOLD 10
typedef struct PageDesc { typedef struct PageDesc {
/* list of TBs intersecting this ram page */ /* list of TBs intersecting this ram page */
uintptr_t first_tb; uintptr_t first_tb;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_USER_ONLY
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
unsigned long *code_bitmap;
unsigned int code_write_count;
#else
unsigned long flags; unsigned long flags;
void *target_data; void *target_data;
#endif #endif
#ifndef CONFIG_USER_ONLY #ifdef CONFIG_SOFTMMU
QemuSpin lock; QemuSpin lock;
#endif #endif
} PageDesc; } PageDesc;
@ -933,7 +928,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
if (i == 0) { if (i == 0) {
prev = (j == 0 ? tb->pc : 0); prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
} else { } else {
prev = tcg_ctx->gen_insn_data[i - 1][j]; prev = tcg_ctx->gen_insn_data[i - 1][j];
} }
@ -961,7 +956,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc, bool reset_icount) uintptr_t searched_pc, bool reset_icount)
{ {
target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; target_ulong data[TARGET_INSN_START_WORDS];
uintptr_t host_pc = (uintptr_t)tb->tc.ptr; uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
const uint8_t *p = tb->tc.ptr + tb->tc.size; const uint8_t *p = tb->tc.ptr + tb->tc.size;
@ -977,6 +972,11 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
return -1; return -1;
} }
memset(data, 0, sizeof(data));
if (!TARGET_TB_PCREL) {
data[0] = tb_pc(tb);
}
/* Reconstruct the stored insn data while looking for the point at /* Reconstruct the stored insn data while looking for the point at
which the end of the insn exceeds the searched_pc. */ which the end of the insn exceeds the searched_pc. */
for (i = 0; i < num_insns; ++i) { for (i = 0; i < num_insns; ++i) {
@ -1100,7 +1100,7 @@ void page_init(void)
#endif #endif
} }
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
{ {
PageDesc *pd; PageDesc *pd;
void **lp; void **lp;
@ -1168,11 +1168,11 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
static inline PageDesc *page_find(tb_page_addr_t index) static inline PageDesc *page_find(tb_page_addr_t index)
{ {
return page_find_alloc(index, 0); return page_find_alloc(index, false);
} }
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc);
/* In user-mode page locks aren't used; mmap_lock is enough */ /* In user-mode page locks aren't used; mmap_lock is enough */
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
@ -1286,7 +1286,7 @@ static inline void page_unlock(PageDesc *pd)
/* lock the page(s) of a TB in the correct acquisition order */ /* lock the page(s) of a TB in the correct acquisition order */
static inline void page_lock_tb(const TranslationBlock *tb) static inline void page_lock_tb(const TranslationBlock *tb)
{ {
page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false);
} }
static inline void page_unlock_tb(const TranslationBlock *tb) static inline void page_unlock_tb(const TranslationBlock *tb)
@ -1475,7 +1475,7 @@ void page_collection_unlock(struct page_collection *set)
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
{ {
PageDesc *p1, *p2; PageDesc *p1, *p2;
tb_page_addr_t page1; tb_page_addr_t page1;
@ -1519,13 +1519,13 @@ static bool tb_cmp(const void *ap, const void *bp)
const TranslationBlock *a = ap; const TranslationBlock *a = ap;
const TranslationBlock *b = bp; const TranslationBlock *b = bp;
return a->pc == b->pc && return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
a->cs_base == b->cs_base && a->cs_base == b->cs_base &&
a->flags == b->flags && a->flags == b->flags &&
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
a->trace_vcpu_dstate == b->trace_vcpu_dstate && a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
a->page_addr[0] == b->page_addr[0] && a->page_addr[0] == b->page_addr[0] &&
a->page_addr[1] == b->page_addr[1]; a->page_addr[1] == b->page_addr[1]);
} }
void tb_htable_init(void) void tb_htable_init(void)
@ -1535,17 +1535,6 @@ void tb_htable_init(void)
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
} }
/* call with @p->lock held */
static inline void invalidate_page_bitmap(PageDesc *p)
{
assert_page_locked(p);
#ifdef CONFIG_SOFTMMU
g_free(p->code_bitmap);
p->code_bitmap = NULL;
p->code_write_count = 0;
#endif
}
/* Set to NULL all the 'first_tb' fields in all PageDescs. */ /* Set to NULL all the 'first_tb' fields in all PageDescs. */
static void page_flush_tb_1(int level, void **lp) static void page_flush_tb_1(int level, void **lp)
{ {
@ -1560,7 +1549,6 @@ static void page_flush_tb_1(int level, void **lp)
for (i = 0; i < V_L2_SIZE; ++i) { for (i = 0; i < V_L2_SIZE; ++i) {
page_lock(&pd[i]); page_lock(&pd[i]);
pd[i].first_tb = (uintptr_t)NULL; pd[i].first_tb = (uintptr_t)NULL;
invalidate_page_bitmap(pd + i);
page_unlock(&pd[i]); page_unlock(&pd[i]);
} }
} else { } else {
@ -1614,7 +1602,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
} }
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
cpu_tb_jmp_cache_clear(cpu); tcg_flush_jmp_cache(cpu);
} }
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
@ -1659,9 +1647,10 @@ static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
TranslationBlock *tb = p; TranslationBlock *tb = p;
target_ulong addr = *(target_ulong *)userp; target_ulong addr = *(target_ulong *)userp;
if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { if (!(addr + TARGET_PAGE_SIZE <= tb_pc(tb) ||
addr >= tb_pc(tb) + tb->size)) {
printf("ERROR invalidate: address=" TARGET_FMT_lx printf("ERROR invalidate: address=" TARGET_FMT_lx
" PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); " PC=%08lx size=%04x\n", addr, (long)tb_pc(tb), tb->size);
} }
} }
@ -1680,11 +1669,11 @@ static void do_tb_page_check(void *p, uint32_t hash, void *userp)
TranslationBlock *tb = p; TranslationBlock *tb = p;
int flags1, flags2; int flags1, flags2;
flags1 = page_get_flags(tb->pc); flags1 = page_get_flags(tb_pc(tb));
flags2 = page_get_flags(tb->pc + tb->size - 1); flags2 = page_get_flags(tb_pc(tb) + tb->size - 1);
if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
(long)tb->pc, tb->size, flags1, flags2); (long)tb_pc(tb), tb->size, flags1, flags2);
} }
} }
@ -1793,6 +1782,28 @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
qemu_spin_unlock(&dest->jmp_lock); qemu_spin_unlock(&dest->jmp_lock);
} }
static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
{
CPUState *cpu;
if (TARGET_TB_PCREL) {
/* A TB may be at any virtual address */
CPU_FOREACH(cpu) {
tcg_flush_jmp_cache(cpu);
}
} else {
uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
CPU_FOREACH(cpu) {
CPUJumpCache *jc = cpu->tb_jmp_cache;
if (qatomic_read(&jc->array[h].tb) == tb) {
qatomic_set(&jc->array[h].tb, NULL);
}
}
}
}
/* /*
* In user-mode, call with mmap_lock held. * In user-mode, call with mmap_lock held.
* In !user-mode, if @rm_from_page_list is set, call with the TB's pages' * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
@ -1800,7 +1811,6 @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
*/ */
static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
{ {
CPUState *cpu;
PageDesc *p; PageDesc *p;
uint32_t h; uint32_t h;
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
@ -1814,9 +1824,9 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
qemu_spin_unlock(&tb->jmp_lock); qemu_spin_unlock(&tb->jmp_lock);
/* remove the TB from the hash list */ /* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); phys_pc = tb->page_addr[0];
h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
tb->trace_vcpu_dstate); tb->flags, orig_cflags, tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) { if (!qht_remove(&tb_ctx.htable, tb, h)) {
return; return;
} }
@ -1825,21 +1835,14 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
if (rm_from_page_list) { if (rm_from_page_list) {
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
tb_page_remove(p, tb); tb_page_remove(p, tb);
invalidate_page_bitmap(p);
if (tb->page_addr[1] != -1) { if (tb->page_addr[1] != -1) {
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
tb_page_remove(p, tb); tb_page_remove(p, tb);
invalidate_page_bitmap(p);
} }
} }
/* remove the TB from the hash list */ /* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc); tb_jmp_cache_inval_tb(tb);
CPU_FOREACH(cpu) {
if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
qatomic_set(&cpu->tb_jmp_cache[h], NULL);
}
}
/* suppress this TB from the two jump lists */ /* suppress this TB from the two jump lists */
tb_remove_from_jmp_list(tb, 0); tb_remove_from_jmp_list(tb, 0);
@ -1874,35 +1877,6 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
} }
} }
#ifdef CONFIG_SOFTMMU
/* call with @p->lock held */
static void build_page_bitmap(PageDesc *p)
{
int n, tb_start, tb_end;
TranslationBlock *tb;
assert_page_locked(p);
p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
PAGE_FOR_EACH_TB(p, tb, n) {
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
tb_start = tb->pc & ~TARGET_PAGE_MASK;
tb_end = tb_start + tb->size;
if (tb_end > TARGET_PAGE_SIZE) {
tb_end = TARGET_PAGE_SIZE;
}
} else {
tb_start = 0;
tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
}
bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
}
}
#endif
/* add the tb in the target page and protect it if necessary /* add the tb in the target page and protect it if necessary
* *
* Called with mmap_lock held for user-mode emulation. * Called with mmap_lock held for user-mode emulation.
@ -1923,7 +1897,6 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
page_already_protected = p->first_tb != (uintptr_t)NULL; page_already_protected = p->first_tb != (uintptr_t)NULL;
#endif #endif
p->first_tb = (uintptr_t)tb | n; p->first_tb = (uintptr_t)tb | n;
invalidate_page_bitmap(p);
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
/* translator_loop() must have made all TB pages non-writable */ /* translator_loop() must have made all TB pages non-writable */
@ -1969,8 +1942,8 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
* Note that inserting into the hash table first isn't an option, since * Note that inserting into the hash table first isn't an option, since
* we can only insert TBs that are fully initialized. * we can only insert TBs that are fully initialized.
*/ */
page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); tb_page_add(p, tb, 0, phys_pc);
if (p2) { if (p2) {
tb_page_add(p2, tb, 1, phys_page2); tb_page_add(p2, tb, 1, phys_page2);
} else { } else {
@ -1978,17 +1951,15 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
} }
/* add in the hash table */ /* add in the hash table */
h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
tb->trace_vcpu_dstate); tb->flags, tb->cflags, tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb); qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
/* remove TB from the page(s) if we couldn't insert it */ /* remove TB from the page(s) if we couldn't insert it */
if (unlikely(existing_tb)) { if (unlikely(existing_tb)) {
tb_page_remove(p, tb); tb_page_remove(p, tb);
invalidate_page_bitmap(p);
if (p2) { if (p2) {
tb_page_remove(p2, tb); tb_page_remove(p2, tb);
invalidate_page_bitmap(p2);
} }
tb = existing_tb; tb = existing_tb;
} }
@ -2202,19 +2173,19 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb; TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc, phys_page2; tb_page_addr_t phys_pc;
target_ulong virt_page2;
tcg_insn_unit *gen_code_buf; tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns; int gen_code_size, search_size, max_insns;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof; TCGProfile *prof = &tcg_ctx->prof;
int64_t ti; int64_t ti;
#endif #endif
void *host_pc;
assert_memory_lock(); assert_memory_lock();
qemu_thread_jit_write(); qemu_thread_jit_write();
phys_pc = get_page_addr_code(env, pc); phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
if (phys_pc == -1) { if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */ /* Generate a one-shot TB with 1 insn in it */
@ -2240,11 +2211,15 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr; gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
#if !TARGET_TB_PCREL
tb->pc = pc; tb->pc = pc;
#endif
tb->cs_base = cs_base; tb->cs_base = cs_base;
tb->flags = flags; tb->flags = flags;
tb->cflags = cflags; tb->cflags = cflags;
tb->trace_vcpu_dstate = *cpu->trace_dstate; tb->trace_vcpu_dstate = *cpu->trace_dstate;
tb->page_addr[0] = phys_pc;
tb->page_addr[1] = -1;
tcg_ctx->tb_cflags = cflags; tcg_ctx->tb_cflags = cflags;
tb_overflow: tb_overflow:
@ -2283,12 +2258,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
//// --- End LibAFL code --- //// --- End LibAFL code ---
gen_intermediate_code(cpu, tb, max_insns); gen_intermediate_code(cpu, tb, max_insns, pc, host_pc);
assert(tb->size != 0); assert(tb->size != 0);
tcg_ctx->cpu = NULL; tcg_ctx->cpu = NULL;
max_insns = tb->icount; max_insns = tb->icount;
trace_translate_block(tb, tb->pc, tb->tc.ptr); trace_translate_block(tb, pc, tb->tc.ptr);
/* generate machine code */ /* generate machine code */
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
@ -2309,7 +2284,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
ti = profile_getclock(); ti = profile_getclock();
#endif #endif
gen_code_size = tcg_gen_code(tcg_ctx, tb); gen_code_size = tcg_gen_code(tcg_ctx, tb, pc);
if (unlikely(gen_code_size < 0)) { if (unlikely(gen_code_size < 0)) {
error_return: error_return:
switch (gen_code_size) { switch (gen_code_size) {
@ -2365,7 +2340,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
#ifdef DEBUG_DISAS #ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(tb->pc)) { qemu_log_in_addr_range(pc)) {
FILE *logfile = qemu_log_trylock(); FILE *logfile = qemu_log_trylock();
if (logfile) { if (logfile) {
int code_size, data_size; int code_size, data_size;
@ -2459,13 +2434,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
} }
/* /*
* If the TB is not associated with a physical RAM page then * If the TB is not associated with a physical RAM page then it must be
* it must be a temporary one-insn TB, and we have nothing to do * a temporary one-insn TB, and we have nothing left to do. Return early
* except fill in the page_addr[] fields. Return early before * before attempting to link to other TBs or add to the lookup table.
* attempting to link to other TBs or add to the lookup table.
*/ */
if (phys_pc == -1) { if (tb->page_addr[0] == -1) {
tb->page_addr[0] = tb->page_addr[1] = -1;
return tb; return tb;
} }
@ -2476,17 +2449,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
*/ */
tcg_tb_insert(tb); tcg_tb_insert(tb);
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}
/* /*
* No explicit memory barrier is required -- tb_link_page() makes the * No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state. * TB visible in a consistent state.
*/ */
existing_tb = tb_link_page(tb, phys_pc, phys_page2); existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]);
/* if the TB already exists, discard what we just translated */ /* if the TB already exists, discard what we just translated */
if (unlikely(existing_tb != tb)) { if (unlikely(existing_tb != tb)) {
uintptr_t orig_aligned = (uintptr_t)gen_code_buf; uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
@ -2541,11 +2508,12 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (n == 0) { if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but /* NOTE: tb_end may be after the end of the page, but
it is not a problem */ it is not a problem */
tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); tb_start = tb->page_addr[0];
tb_end = tb_start + tb->size; tb_end = tb_start + tb->size;
} else { } else {
tb_start = tb->page_addr[1]; tb_start = tb->page_addr[1];
tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); tb_end = tb_start + ((tb->page_addr[0] + tb->size)
& ~TARGET_PAGE_MASK);
} }
if (!(tb_end <= start || tb_start >= end)) { if (!(tb_end <= start || tb_start >= end)) {
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
@ -2575,7 +2543,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
/* if no code remaining, no need to continue to use slow writes */ /* if no code remaining, no need to continue to use slow writes */
if (!p->first_tb) { if (!p->first_tb) {
invalidate_page_bitmap(p);
tlb_unprotect_code(start); tlb_unprotect_code(start);
} }
#endif #endif
@ -2671,24 +2638,8 @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
} }
assert_page_locked(p); assert_page_locked(p);
if (!p->code_bitmap && tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { retaddr);
build_page_bitmap(p);
}
if (p->code_bitmap) {
unsigned int nr;
unsigned long b;
nr = start & ~TARGET_PAGE_MASK;
b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
if (b & ((1 << len) - 1)) {
goto do_invalidate;
}
} else {
do_invalidate:
tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
retaddr);
}
} }
#else #else
/* Called with mmap_lock held. If pc is not 0 then it indicates the /* Called with mmap_lock held. If pc is not 0 then it indicates the
@ -2829,9 +2780,13 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
*/ */
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
"cpu_io_recompile: rewound execution of TB to " target_ulong pc = log_pc(cpu, tb);
TARGET_FMT_lx "\n", tb->pc); if (qemu_log_in_addr_range(pc)) {
qemu_log("cpu_io_recompile: rewound execution of TB to "
TARGET_FMT_lx "\n", pc);
}
}
cpu_loop_exit_noexc(cpu); cpu_loop_exit_noexc(cpu);
} }
@ -3102,7 +3057,7 @@ int page_get_flags(target_ulong address)
#ifndef PAGE_TARGET_STICKY #ifndef PAGE_TARGET_STICKY
#define PAGE_TARGET_STICKY 0 #define PAGE_TARGET_STICKY 0
#endif #endif
#define PAGE_STICKY (PAGE_ANON | PAGE_TARGET_STICKY) #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
/* Modify the flags of a page and invalidate the code if necessary. /* Modify the flags of a page and invalidate the code if necessary.
The flag PAGE_WRITE_ORG is positioned automatically depending The flag PAGE_WRITE_ORG is positioned automatically depending
@ -3133,7 +3088,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
for (addr = start, len = end - start; for (addr = start, len = end - start;
len != 0; len != 0;
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true);
/* If the write protection bit is set, then we invalidate /* If the write protection bit is set, then we invalidate
the code inside. */ the code inside. */
@ -3153,6 +3108,32 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
} }
} }
void page_reset_target_data(target_ulong start, target_ulong end)
{
target_ulong addr, len;
/*
* This function should never be called with addresses outside the
* guest address space. If this assert fires, it probably indicates
* a missing call to h2g_valid.
*/
assert(end - 1 <= GUEST_ADDR_MAX);
assert(start < end);
assert_memory_lock();
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
for (addr = start, len = end - start;
len != 0;
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
g_free(p->target_data);
p->target_data = NULL;
}
}
void *page_get_target_data(target_ulong address) void *page_get_target_data(target_ulong address)
{ {
PageDesc *p = page_find(address >> TARGET_PAGE_BITS); PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
@ -3330,6 +3311,26 @@ int page_unprotect(target_ulong address, uintptr_t pc)
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
/*
* Called by generic code at e.g. cpu reset after cpu creation,
* therefore we must be prepared to allocate the jump cache.
*/
void tcg_flush_jmp_cache(CPUState *cpu)
{
CPUJumpCache *jc = cpu->tb_jmp_cache;
if (likely(jc)) {
for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
qatomic_set(&jc->array[i].tb, NULL);
}
} else {
/* This should happen once during realize, and thus never race. */
jc = g_new0(CPUJumpCache, 1);
jc = qatomic_xchg(&cpu->tb_jmp_cache, jc);
assert(jc == NULL);
}
}
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
void tcg_flush_softmmu_tlb(CPUState *cs) void tcg_flush_softmmu_tlb(CPUState *cs)
{ {

View File

@ -78,30 +78,27 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0; return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
} }
static inline void translator_page_protect(DisasContextBase *dcbase, void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc) target_ulong pc, void *host_pc,
{ const TranslatorOps *ops, DisasContextBase *db)
#ifdef CONFIG_USER_ONLY
dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
page_protect(pc);
#endif
}
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns)
{ {
uint32_t cflags = tb_cflags(tb); uint32_t cflags = tb_cflags(tb);
bool plugin_enabled; bool plugin_enabled;
/* Initialize DisasContext */ /* Initialize DisasContext */
db->tb = tb; db->tb = tb;
db->pc_first = tb->pc; db->pc_first = pc;
db->pc_next = db->pc_first; db->pc_next = pc;
db->is_jmp = DISAS_NEXT; db->is_jmp = DISAS_NEXT;
db->num_insns = 0; db->num_insns = 0;
db->max_insns = max_insns; db->max_insns = max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP; db->singlestep_enabled = cflags & CF_SINGLE_STEP;
translator_page_protect(db, db->pc_next); db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
#ifdef CONFIG_USER_ONLY
page_protect(pc);
#endif
ops->init_disas_context(db, cpu); ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@ -114,7 +111,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
ops->tb_start(db, cpu); ops->tb_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY); plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
while (true) { while (true) {
db->num_insns++; db->num_insns++;
@ -255,31 +252,103 @@ post_translate_insn:
#endif #endif
} }
static inline void translator_maybe_page_protect(DisasContextBase *dcbase, static void *translator_access(CPUArchState *env, DisasContextBase *db,
target_ulong pc, size_t len) target_ulong pc, size_t len)
{ {
#ifdef CONFIG_USER_ONLY void *host;
target_ulong end = pc + len - 1; target_ulong base, end;
TranslationBlock *tb;
if (end > dcbase->page_protect_end) { tb = db->tb;
translator_page_protect(dcbase, end);
/* Use slow path if first page is MMIO. */
if (unlikely(tb->page_addr[0] == -1)) {
return NULL;
} }
end = pc + len - 1;
if (likely(is_same_page(db, end))) {
host = db->host_addr[0];
base = db->pc_first;
} else {
host = db->host_addr[1];
base = TARGET_PAGE_ALIGN(db->pc_first);
if (host == NULL) {
tb->page_addr[1] =
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
#ifdef CONFIG_USER_ONLY
page_protect(end);
#endif #endif
/* We cannot handle MMIO as second page. */
assert(tb->page_addr[1] != -1);
host = db->host_addr[1];
}
/* Use slow path when crossing pages. */
if (is_same_page(db, pc)) {
return NULL;
}
}
tcg_debug_assert(pc >= base);
return host + (pc - base);
} }
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \ uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \ {
abi_ptr pc, bool do_swap) \ uint8_t ret;
{ \ void *p = translator_access(env, db, pc, sizeof(ret));
translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
type ret = load_fn(env, pc); \ if (p) {
if (do_swap) { \ plugin_insn_append(pc, p, sizeof(ret));
ret = swap_fn(ret); \ return ldub_p(p);
} \
plugin_insn_append(pc, &ret, sizeof(ret)); \
return ret; \
} }
ret = cpu_ldub_code(env, pc);
plugin_insn_append(pc, &ret, sizeof(ret));
return ret;
}
FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD) uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{
uint16_t ret, plug;
void *p = translator_access(env, db, pc, sizeof(ret));
#undef GEN_TRANSLATOR_LD if (p) {
plugin_insn_append(pc, p, sizeof(ret));
return lduw_p(p);
}
ret = cpu_lduw_code(env, pc);
plug = tswap16(ret);
plugin_insn_append(pc, &plug, sizeof(ret));
return ret;
}
uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{
uint32_t ret, plug;
void *p = translator_access(env, db, pc, sizeof(ret));
if (p) {
plugin_insn_append(pc, p, sizeof(ret));
return ldl_p(p);
}
ret = cpu_ldl_code(env, pc);
plug = tswap32(ret);
plugin_insn_append(pc, &plug, sizeof(ret));
return ret;
}
uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{
uint64_t ret, plug;
void *p = translator_access(env, db, pc, sizeof(ret));
if (p) {
plugin_insn_append(pc, p, sizeof(ret));
return ldq_p(p);
}
ret = cpu_ldq_code(env, pc);
plug = tswap64(ret);
plugin_insn_append(pc, &plug, sizeof(ret));
return ret;
}

View File

@ -80,10 +80,7 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
* (and if the translator doesn't handle page boundaries correctly * (and if the translator doesn't handle page boundaries correctly
* there's little we can do about that here). Therefore, do not * there's little we can do about that here). Therefore, do not
* trigger the unwinder. * trigger the unwinder.
*
* Like tb_gen_code, release the memory lock before cpu_loop_exit.
*/ */
mmap_unlock();
*pc = 0; *pc = 0;
return MMU_INST_FETCH; return MMU_INST_FETCH;
} }
@ -199,6 +196,20 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
return size ? g2h(env_cpu(env), addr) : NULL; return size ? g2h(env_cpu(env), addr) : NULL;
} }
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
int flags;
flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
g_assert(flags == 0);
if (hostp) {
*hostp = g2h_untagged(addr);
}
return addr;
}
/* The softmmu versions of these helpers are in cputlb.c. */ /* The softmmu versions of these helpers are in cputlb.c. */
/* /*

View File

@ -32,6 +32,7 @@
#include "qapi/qapi-visit-audio.h" #include "qapi/qapi-visit-audio.h"
#include "qemu/cutils.h" #include "qemu/cutils.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "qemu/help_option.h"
#include "sysemu/sysemu.h" #include "sysemu/sysemu.h"
#include "sysemu/replay.h" #include "sysemu/replay.h"
#include "sysemu/runstate.h" #include "sysemu/runstate.h"
@ -137,7 +138,7 @@ static inline int audio_bits_to_index (int bits)
default: default:
audio_bug ("bits_to_index", 1); audio_bug ("bits_to_index", 1);
AUD_log (NULL, "invalid bits %d\n", bits); AUD_log (NULL, "invalid bits %d\n", bits);
abort(); return 0;
} }
} }
@ -155,7 +156,7 @@ void *audio_calloc (const char *funcname, int nmemb, size_t size)
AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n", AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n",
funcname); funcname);
AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len); AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len);
abort(); return NULL;
} }
return g_malloc0 (len); return g_malloc0 (len);
@ -542,7 +543,7 @@ static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw)
size_t live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw); size_t live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw);
if (audio_bug(__func__, live > hw->conv_buf->size)) { if (audio_bug(__func__, live > hw->conv_buf->size)) {
dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size); dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
abort(); return 0;
} }
return live; return live;
} }
@ -580,7 +581,7 @@ static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size)
} }
if (audio_bug(__func__, live > hw->conv_buf->size)) { if (audio_bug(__func__, live > hw->conv_buf->size)) {
dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size); dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
abort(); return 0;
} }
rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size); rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size);
@ -655,7 +656,7 @@ static size_t audio_pcm_hw_get_live_out (HWVoiceOut *hw, int *nb_live)
if (audio_bug(__func__, live > hw->mix_buf->size)) { if (audio_bug(__func__, live > hw->mix_buf->size)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size); dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
abort(); return 0;
} }
return live; return live;
} }
@ -705,7 +706,7 @@ static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
live = sw->total_hw_samples_mixed; live = sw->total_hw_samples_mixed;
if (audio_bug(__func__, live > hwsamples)) { if (audio_bug(__func__, live > hwsamples)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hwsamples); dolog("live=%zu hw->mix_buf->size=%zu\n", live, hwsamples);
abort(); return 0;
} }
if (live == hwsamples) { if (live == hwsamples) {
@ -997,7 +998,7 @@ static size_t audio_get_avail (SWVoiceIn *sw)
if (audio_bug(__func__, live > sw->hw->conv_buf->size)) { if (audio_bug(__func__, live > sw->hw->conv_buf->size)) {
dolog("live=%zu sw->hw->conv_buf->size=%zu\n", live, dolog("live=%zu sw->hw->conv_buf->size=%zu\n", live,
sw->hw->conv_buf->size); sw->hw->conv_buf->size);
abort(); return 0;
} }
ldebug ( ldebug (
@ -1027,7 +1028,7 @@ static size_t audio_get_free(SWVoiceOut *sw)
if (audio_bug(__func__, live > sw->hw->mix_buf->size)) { if (audio_bug(__func__, live > sw->hw->mix_buf->size)) {
dolog("live=%zu sw->hw->mix_buf->size=%zu\n", live, dolog("live=%zu sw->hw->mix_buf->size=%zu\n", live,
sw->hw->mix_buf->size); sw->hw->mix_buf->size);
abort(); return 0;
} }
dead = sw->hw->mix_buf->size - live; dead = sw->hw->mix_buf->size - live;
@ -1169,7 +1170,7 @@ static void audio_run_out (AudioState *s)
if (audio_bug(__func__, live > hw->mix_buf->size)) { if (audio_bug(__func__, live > hw->mix_buf->size)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size); dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
abort(); continue;
} }
if (hw->pending_disable && !nb_live) { if (hw->pending_disable && !nb_live) {
@ -1202,7 +1203,7 @@ static void audio_run_out (AudioState *s)
if (audio_bug(__func__, hw->mix_buf->pos >= hw->mix_buf->size)) { if (audio_bug(__func__, hw->mix_buf->pos >= hw->mix_buf->size)) {
dolog("hw->mix_buf->pos=%zu hw->mix_buf->size=%zu played=%zu\n", dolog("hw->mix_buf->pos=%zu hw->mix_buf->size=%zu played=%zu\n",
hw->mix_buf->pos, hw->mix_buf->size, played); hw->mix_buf->pos, hw->mix_buf->size, played);
abort(); hw->mix_buf->pos = 0;
} }
#ifdef DEBUG_OUT #ifdef DEBUG_OUT
@ -1222,7 +1223,7 @@ static void audio_run_out (AudioState *s)
if (audio_bug(__func__, played > sw->total_hw_samples_mixed)) { if (audio_bug(__func__, played > sw->total_hw_samples_mixed)) {
dolog("played=%zu sw->total_hw_samples_mixed=%zu\n", dolog("played=%zu sw->total_hw_samples_mixed=%zu\n",
played, sw->total_hw_samples_mixed); played, sw->total_hw_samples_mixed);
abort(); played = sw->total_hw_samples_mixed;
} }
sw->total_hw_samples_mixed -= played; sw->total_hw_samples_mixed -= played;
@ -1345,7 +1346,7 @@ static void audio_run_capture (AudioState *s)
if (audio_bug(__func__, captured > sw->total_hw_samples_mixed)) { if (audio_bug(__func__, captured > sw->total_hw_samples_mixed)) {
dolog("captured=%zu sw->total_hw_samples_mixed=%zu\n", dolog("captured=%zu sw->total_hw_samples_mixed=%zu\n",
captured, sw->total_hw_samples_mixed); captured, sw->total_hw_samples_mixed);
abort(); captured = sw->total_hw_samples_mixed;
} }
sw->total_hw_samples_mixed -= captured; sw->total_hw_samples_mixed -= captured;
@ -1743,7 +1744,6 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
atexit(audio_cleanup); atexit(audio_cleanup);
atexit_registered = true; atexit_registered = true;
} }
QTAILQ_INSERT_TAIL(&audio_states, s, list);
s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s); s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s);
@ -1769,6 +1769,10 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
} else { } else {
dolog ("Unknown audio driver `%s'\n", drvname); dolog ("Unknown audio driver `%s'\n", drvname);
} }
if (!done) {
free_audio_state(s);
return NULL;
}
} else { } else {
for (i = 0; audio_prio_list[i]; i++) { for (i = 0; audio_prio_list[i]; i++) {
AudiodevListEntry *e = audiodev_find(&head, audio_prio_list[i]); AudiodevListEntry *e = audiodev_find(&head, audio_prio_list[i]);
@ -1806,6 +1810,7 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
"(Audio can continue looping even after stopping the VM)\n"); "(Audio can continue looping even after stopping the VM)\n");
} }
QTAILQ_INSERT_TAIL(&audio_states, s, list);
QLIST_INIT (&s->card_head); QLIST_INIT (&s->card_head);
vmstate_register (NULL, 0, &vmstate_audio, s); vmstate_register (NULL, 0, &vmstate_audio, s);
return s; return s;
@ -2025,6 +2030,7 @@ void audio_create_pdos(Audiodev *dev)
CASE(OSS, oss, Oss); CASE(OSS, oss, Oss);
CASE(PA, pa, Pa); CASE(PA, pa, Pa);
CASE(SDL, sdl, Sdl); CASE(SDL, sdl, Sdl);
CASE(SNDIO, sndio, );
CASE(SPICE, spice, ); CASE(SPICE, spice, );
CASE(WAV, wav, ); CASE(WAV, wav, );
@ -2097,10 +2103,28 @@ static void audio_validate_opts(Audiodev *dev, Error **errp)
} }
} }
void audio_help(void)
{
int i;
printf("Available audio drivers:\n");
for (i = 0; i < AUDIODEV_DRIVER__MAX; i++) {
audio_driver *driver = audio_driver_lookup(AudiodevDriver_str(i));
if (driver) {
printf("%s\n", driver->name);
}
}
}
void audio_parse_option(const char *opt) void audio_parse_option(const char *opt)
{ {
Audiodev *dev = NULL; Audiodev *dev = NULL;
if (is_help_option(opt)) {
audio_help();
exit(EXIT_SUCCESS);
}
Visitor *v = qobject_input_visitor_new_str(opt, "driver", &error_fatal); Visitor *v = qobject_input_visitor_new_str(opt, "driver", &error_fatal);
visit_type_Audiodev(v, NULL, &dev, &error_fatal); visit_type_Audiodev(v, NULL, &dev, &error_fatal);
visit_free(v); visit_free(v);
@ -2119,13 +2143,17 @@ void audio_define(Audiodev *dev)
QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next); QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next);
} }
void audio_init_audiodevs(void) bool audio_init_audiodevs(void)
{ {
AudiodevListEntry *e; AudiodevListEntry *e;
QSIMPLEQ_FOREACH(e, &audiodevs, next) { QSIMPLEQ_FOREACH(e, &audiodevs, next) {
audio_init(e->dev, NULL); if (!audio_init(e->dev, NULL)) {
return false;
}
} }
return true;
} }
audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo) audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo)

View File

@ -170,7 +170,8 @@ void audio_sample_from_uint64(void *samples, int pos,
void audio_define(Audiodev *audio); void audio_define(Audiodev *audio);
void audio_parse_option(const char *opt); void audio_parse_option(const char *opt);
void audio_init_audiodevs(void); bool audio_init_audiodevs(void);
void audio_help(void);
void audio_legacy_help(void); void audio_legacy_help(void);
AudioState *audio_state_by_name(const char *name); AudioState *audio_state_by_name(const char *name);

View File

@ -59,13 +59,12 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
if (audio_bug(__func__, !voice_size && max_voices)) { if (audio_bug(__func__, !voice_size && max_voices)) {
dolog ("drv=`%s' voice_size=0 max_voices=%d\n", dolog ("drv=`%s' voice_size=0 max_voices=%d\n",
drv->name, max_voices); drv->name, max_voices);
abort(); glue (s->nb_hw_voices_, TYPE) = 0;
} }
if (audio_bug(__func__, voice_size && !max_voices)) { if (audio_bug(__func__, voice_size && !max_voices)) {
dolog ("drv=`%s' voice_size=%d max_voices=0\n", dolog ("drv=`%s' voice_size=%d max_voices=0\n",
drv->name, voice_size); drv->name, voice_size);
abort();
} }
} }
@ -82,7 +81,6 @@ static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
size_t samples = hw->samples; size_t samples = hw->samples;
if (audio_bug(__func__, samples == 0)) { if (audio_bug(__func__, samples == 0)) {
dolog("Attempted to allocate empty buffer\n"); dolog("Attempted to allocate empty buffer\n");
abort();
} }
HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples); HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples);
@ -254,12 +252,12 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
if (audio_bug(__func__, !drv)) { if (audio_bug(__func__, !drv)) {
dolog ("No host audio driver\n"); dolog ("No host audio driver\n");
abort(); return NULL;
} }
if (audio_bug(__func__, !drv->pcm_ops)) { if (audio_bug(__func__, !drv->pcm_ops)) {
dolog ("Host audio driver without pcm_ops\n"); dolog ("Host audio driver without pcm_ops\n");
abort(); return NULL;
} }
hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE)); hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE));
@ -277,13 +275,12 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
QLIST_INIT (&hw->cap_head); QLIST_INIT (&hw->cap_head);
#endif #endif
if (glue (hw->pcm_ops->init_, TYPE) (hw, as, s->drv_opaque)) { if (glue (hw->pcm_ops->init_, TYPE) (hw, as, s->drv_opaque)) {
g_free(hw); goto err0;
return NULL;
} }
if (audio_bug(__func__, hw->samples <= 0)) { if (audio_bug(__func__, hw->samples <= 0)) {
dolog("hw->samples=%zd\n", hw->samples); dolog("hw->samples=%zd\n", hw->samples);
abort(); goto err1;
} }
if (hw->info.is_float) { if (hw->info.is_float) {
@ -312,6 +309,12 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
audio_attach_capture (hw); audio_attach_capture (hw);
#endif #endif
return hw; return hw;
err1:
glue (hw->pcm_ops->fini_, TYPE) (hw);
err0:
g_free (hw);
return NULL;
} }
AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev) AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev)
@ -336,6 +339,8 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev)
return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE); return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE);
case AUDIODEV_DRIVER_SDL: case AUDIODEV_DRIVER_SDL:
return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE); return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE);
case AUDIODEV_DRIVER_SNDIO:
return dev->u.sndio.TYPE;
case AUDIODEV_DRIVER_SPICE: case AUDIODEV_DRIVER_SPICE:
return dev->u.spice.TYPE; return dev->u.spice.TYPE;
case AUDIODEV_DRIVER_WAV: case AUDIODEV_DRIVER_WAV:
@ -432,7 +437,7 @@ void glue (AUD_close_, TYPE) (QEMUSoundCard *card, SW *sw)
if (sw) { if (sw) {
if (audio_bug(__func__, !card)) { if (audio_bug(__func__, !card)) {
dolog ("card=%p\n", card); dolog ("card=%p\n", card);
abort(); return;
} }
glue (audio_close_, TYPE) (sw); glue (audio_close_, TYPE) (sw);
@ -454,7 +459,7 @@ SW *glue (AUD_open_, TYPE) (
if (audio_bug(__func__, !card || !name || !callback_fn || !as)) { if (audio_bug(__func__, !card || !name || !callback_fn || !as)) {
dolog ("card=%p name=%p callback_fn=%p as=%p\n", dolog ("card=%p name=%p callback_fn=%p as=%p\n",
card, name, callback_fn, as); card, name, callback_fn, as);
abort(); goto fail;
} }
s = card->state; s = card->state;
@ -465,12 +470,12 @@ SW *glue (AUD_open_, TYPE) (
if (audio_bug(__func__, audio_validate_settings(as))) { if (audio_bug(__func__, audio_validate_settings(as))) {
audio_print_settings (as); audio_print_settings (as);
abort(); goto fail;
} }
if (audio_bug(__func__, !s->drv)) { if (audio_bug(__func__, !s->drv)) {
dolog ("Can not open `%s' (no host audio driver)\n", name); dolog ("Can not open `%s' (no host audio driver)\n", name);
abort(); goto fail;
} }
if (sw && audio_pcm_info_eq (&sw->info, as)) { if (sw && audio_pcm_info_eq (&sw->info, as)) {

View File

@ -17,6 +17,7 @@ foreach m : [
['pa', pulse, files('paaudio.c')], ['pa', pulse, files('paaudio.c')],
['sdl', sdl, files('sdlaudio.c')], ['sdl', sdl, files('sdlaudio.c')],
['jack', jack, files('jackaudio.c')], ['jack', jack, files('jackaudio.c')],
['sndio', sndio, files('sndioaudio.c')],
['spice', spice, files('spiceaudio.c')] ['spice', spice, files('spiceaudio.c')]
] ]
if m[1].found() if m[1].found()

565
audio/sndioaudio.c Normal file
View File

@ -0,0 +1,565 @@
/*
* SPDX-License-Identifier: ISC
*
* Copyright (c) 2019 Alexandre Ratchov <alex@caoua.org>
*/
/*
* TODO :
*
* Use a single device and open it in full-duplex rather than
* opening it twice (once for playback once for recording).
*
* This is the only way to ensure that playback doesn't drift with respect
* to recording, which is what guest systems expect.
*/
#include <poll.h>
#include <sndio.h>
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "audio.h"
#include "trace.h"
#define AUDIO_CAP "sndio"
#include "audio_int.h"
/* default latency in microseconds if no option is set */
#define SNDIO_LATENCY_US 50000
typedef struct SndioVoice {
union {
HWVoiceOut out;
HWVoiceIn in;
} hw;
struct sio_par par;
struct sio_hdl *hdl;
struct pollfd *pfds;
struct pollindex {
struct SndioVoice *self;
int index;
} *pindexes;
unsigned char *buf;
size_t buf_size;
size_t sndio_pos;
size_t qemu_pos;
unsigned int mode;
unsigned int nfds;
bool enabled;
} SndioVoice;
typedef struct SndioConf {
const char *devname;
unsigned int latency;
} SndioConf;
/* needed for forward reference */
static void sndio_poll_in(void *arg);
static void sndio_poll_out(void *arg);
/*
* stop polling descriptors
*/
static void sndio_poll_clear(SndioVoice *self)
{
struct pollfd *pfd;
int i;
for (i = 0; i < self->nfds; i++) {
pfd = &self->pfds[i];
qemu_set_fd_handler(pfd->fd, NULL, NULL, NULL);
}
self->nfds = 0;
}
/*
* write data to the device until it blocks or
* all of our buffered data is written
*/
static void sndio_write(SndioVoice *self)
{
size_t todo, n;
todo = self->qemu_pos - self->sndio_pos;
/*
* transfer data to device, until it blocks
*/
while (todo > 0) {
n = sio_write(self->hdl, self->buf + self->sndio_pos, todo);
if (n == 0) {
break;
}
self->sndio_pos += n;
todo -= n;
}
if (self->sndio_pos == self->buf_size) {
/*
* we complete the block
*/
self->sndio_pos = 0;
self->qemu_pos = 0;
}
}
/*
* read data from the device until it blocks or
* there no room any longer
*/
static void sndio_read(SndioVoice *self)
{
size_t todo, n;
todo = self->buf_size - self->sndio_pos;
/*
* transfer data from the device, until it blocks
*/
while (todo > 0) {
n = sio_read(self->hdl, self->buf + self->sndio_pos, todo);
if (n == 0) {
break;
}
self->sndio_pos += n;
todo -= n;
}
}
/*
* Set handlers for all descriptors libsndio needs to
* poll
*/
static void sndio_poll_wait(SndioVoice *self)
{
struct pollfd *pfd;
int events, i;
events = 0;
if (self->mode == SIO_PLAY) {
if (self->sndio_pos < self->qemu_pos) {
events |= POLLOUT;
}
} else {
if (self->sndio_pos < self->buf_size) {
events |= POLLIN;
}
}
/*
* fill the given array of descriptors with the events sndio
* wants, they are different from our 'event' variable because
* sndio may use descriptors internally.
*/
self->nfds = sio_pollfd(self->hdl, self->pfds, events);
for (i = 0; i < self->nfds; i++) {
pfd = &self->pfds[i];
if (pfd->fd < 0) {
continue;
}
qemu_set_fd_handler(pfd->fd,
(pfd->events & POLLIN) ? sndio_poll_in : NULL,
(pfd->events & POLLOUT) ? sndio_poll_out : NULL,
&self->pindexes[i]);
pfd->revents = 0;
}
}
/*
* call-back called when one of the descriptors
* became readable or writable
*/
static void sndio_poll_event(SndioVoice *self, int index, int event)
{
int revents;
/*
* ensure we're not called twice this cycle
*/
sndio_poll_clear(self);
/*
* make self->pfds[] look as we're returning from poll syscal,
* this is how sio_revents expects events to be.
*/
self->pfds[index].revents = event;
/*
* tell sndio to handle events and return whether we can read or
* write without blocking.
*/
revents = sio_revents(self->hdl, self->pfds);
if (self->mode == SIO_PLAY) {
if (revents & POLLOUT) {
sndio_write(self);
}
if (self->qemu_pos < self->buf_size) {
audio_run(self->hw.out.s, "sndio_out");
}
} else {
if (revents & POLLIN) {
sndio_read(self);
}
if (self->qemu_pos < self->sndio_pos) {
audio_run(self->hw.in.s, "sndio_in");
}
}
/*
* audio_run() may have changed state
*/
if (self->enabled) {
sndio_poll_wait(self);
}
}
/*
* return the upper limit of the amount of free play buffer space
*/
static size_t sndio_buffer_get_free(HWVoiceOut *hw)
{
SndioVoice *self = (SndioVoice *) hw;
return self->buf_size - self->qemu_pos;
}
/*
* return a buffer where data to play can be stored,
* its size is stored in the location pointed by the size argument.
*/
static void *sndio_get_buffer_out(HWVoiceOut *hw, size_t *size)
{
SndioVoice *self = (SndioVoice *) hw;
*size = self->buf_size - self->qemu_pos;
return self->buf + self->qemu_pos;
}
/*
* put back to sndio back-end a buffer returned by sndio_get_buffer_out()
*/
static size_t sndio_put_buffer_out(HWVoiceOut *hw, void *buf, size_t size)
{
SndioVoice *self = (SndioVoice *) hw;
self->qemu_pos += size;
sndio_poll_wait(self);
return size;
}
/*
* return a buffer from where recorded data is available,
* its size is stored in the location pointed by the size argument.
* it may not exceed the initial value of "*size".
*/
static void *sndio_get_buffer_in(HWVoiceIn *hw, size_t *size)
{
SndioVoice *self = (SndioVoice *) hw;
size_t todo, max_todo;
/*
* unlike the get_buffer_out() method, get_buffer_in()
* must return a buffer of at most the given size, see audio.c
*/
max_todo = *size;
todo = self->sndio_pos - self->qemu_pos;
if (todo > max_todo) {
todo = max_todo;
}
*size = todo;
return self->buf + self->qemu_pos;
}
/*
* discard the given amount of recorded data
*/
static void sndio_put_buffer_in(HWVoiceIn *hw, void *buf, size_t size)
{
SndioVoice *self = (SndioVoice *) hw;
self->qemu_pos += size;
if (self->qemu_pos == self->buf_size) {
self->qemu_pos = 0;
self->sndio_pos = 0;
}
sndio_poll_wait(self);
}
/*
* call-back called when one of our descriptors becomes writable
*/
static void sndio_poll_out(void *arg)
{
struct pollindex *pindex = (struct pollindex *) arg;
sndio_poll_event(pindex->self, pindex->index, POLLOUT);
}
/*
* call-back called when one of our descriptors becomes readable
*/
static void sndio_poll_in(void *arg)
{
struct pollindex *pindex = (struct pollindex *) arg;
sndio_poll_event(pindex->self, pindex->index, POLLIN);
}
static void sndio_fini(SndioVoice *self)
{
if (self->hdl) {
sio_close(self->hdl);
self->hdl = NULL;
}
g_free(self->pfds);
g_free(self->pindexes);
g_free(self->buf);
}
static int sndio_init(SndioVoice *self,
struct audsettings *as, int mode, Audiodev *dev)
{
AudiodevSndioOptions *opts = &dev->u.sndio;
unsigned long long latency;
const char *dev_name;
struct sio_par req;
unsigned int nch;
int i, nfds;
dev_name = opts->has_dev ? opts->dev : SIO_DEVANY;
latency = opts->has_latency ? opts->latency : SNDIO_LATENCY_US;
/* open the device in non-blocking mode */
self->hdl = sio_open(dev_name, mode, 1);
if (self->hdl == NULL) {
dolog("failed to open device\n");
return -1;
}
self->mode = mode;
sio_initpar(&req);
switch (as->fmt) {
case AUDIO_FORMAT_S8:
req.bits = 8;
req.sig = 1;
break;
case AUDIO_FORMAT_U8:
req.bits = 8;
req.sig = 0;
break;
case AUDIO_FORMAT_S16:
req.bits = 16;
req.sig = 1;
break;
case AUDIO_FORMAT_U16:
req.bits = 16;
req.sig = 0;
break;
case AUDIO_FORMAT_S32:
req.bits = 32;
req.sig = 1;
break;
case AUDIO_FORMAT_U32:
req.bits = 32;
req.sig = 0;
break;
default:
dolog("unknown audio sample format\n");
return -1;
}
if (req.bits > 8) {
req.le = as->endianness ? 0 : 1;
}
req.rate = as->freq;
if (mode == SIO_PLAY) {
req.pchan = as->nchannels;
} else {
req.rchan = as->nchannels;
}
/* set on-device buffer size */
req.appbufsz = req.rate * latency / 1000000;
if (!sio_setpar(self->hdl, &req)) {
dolog("failed set audio params\n");
goto fail;
}
if (!sio_getpar(self->hdl, &self->par)) {
dolog("failed get audio params\n");
goto fail;
}
nch = (mode == SIO_PLAY) ? self->par.pchan : self->par.rchan;
/*
* With the default setup, sndio supports any combination of parameters
* so these checks are mostly to catch configuration errors.
*/
if (self->par.bits != req.bits || self->par.bps != req.bits / 8 ||
self->par.sig != req.sig || (req.bits > 8 && self->par.le != req.le) ||
self->par.rate != as->freq || nch != as->nchannels) {
dolog("unsupported audio params\n");
goto fail;
}
/*
* we use one block as buffer size; this is how
* transfers get well aligned
*/
self->buf_size = self->par.round * self->par.bps * nch;
self->buf = g_malloc(self->buf_size);
if (self->buf == NULL) {
dolog("failed to allocate audio buffer\n");
goto fail;
}
nfds = sio_nfds(self->hdl);
self->pfds = g_malloc_n(nfds, sizeof(struct pollfd));
if (self->pfds == NULL) {
dolog("failed to allocate pollfd structures\n");
goto fail;
}
self->pindexes = g_malloc_n(nfds, sizeof(struct pollindex));
if (self->pindexes == NULL) {
dolog("failed to allocate pollindex structures\n");
goto fail;
}
for (i = 0; i < nfds; i++) {
self->pindexes[i].self = self;
self->pindexes[i].index = i;
}
return 0;
fail:
sndio_fini(self);
return -1;
}
static void sndio_enable(SndioVoice *self, bool enable)
{
if (enable) {
sio_start(self->hdl);
self->enabled = true;
sndio_poll_wait(self);
} else {
self->enabled = false;
sndio_poll_clear(self);
sio_stop(self->hdl);
}
}
static void sndio_enable_out(HWVoiceOut *hw, bool enable)
{
SndioVoice *self = (SndioVoice *) hw;
sndio_enable(self, enable);
}
static void sndio_enable_in(HWVoiceIn *hw, bool enable)
{
SndioVoice *self = (SndioVoice *) hw;
sndio_enable(self, enable);
}
static int sndio_init_out(HWVoiceOut *hw, struct audsettings *as, void *opaque)
{
SndioVoice *self = (SndioVoice *) hw;
if (sndio_init(self, as, SIO_PLAY, opaque) == -1) {
return -1;
}
audio_pcm_init_info(&hw->info, as);
hw->samples = self->par.round;
return 0;
}
static int sndio_init_in(HWVoiceIn *hw, struct audsettings *as, void *opaque)
{
SndioVoice *self = (SndioVoice *) hw;
if (sndio_init(self, as, SIO_REC, opaque) == -1) {
return -1;
}
audio_pcm_init_info(&hw->info, as);
hw->samples = self->par.round;
return 0;
}
static void sndio_fini_out(HWVoiceOut *hw)
{
SndioVoice *self = (SndioVoice *) hw;
sndio_fini(self);
}
static void sndio_fini_in(HWVoiceIn *hw)
{
SndioVoice *self = (SndioVoice *) hw;
sndio_fini(self);
}
static void *sndio_audio_init(Audiodev *dev)
{
assert(dev->driver == AUDIODEV_DRIVER_SNDIO);
return dev;
}
static void sndio_audio_fini(void *opaque)
{
}
static struct audio_pcm_ops sndio_pcm_ops = {
.init_out = sndio_init_out,
.fini_out = sndio_fini_out,
.enable_out = sndio_enable_out,
.write = audio_generic_write,
.buffer_get_free = sndio_buffer_get_free,
.get_buffer_out = sndio_get_buffer_out,
.put_buffer_out = sndio_put_buffer_out,
.init_in = sndio_init_in,
.fini_in = sndio_fini_in,
.read = audio_generic_read,
.enable_in = sndio_enable_in,
.get_buffer_in = sndio_get_buffer_in,
.put_buffer_in = sndio_put_buffer_in,
};
static struct audio_driver sndio_audio_driver = {
.name = "sndio",
.descr = "sndio https://sndio.org",
.init = sndio_audio_init,
.fini = sndio_audio_fini,
.pcm_ops = &sndio_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof(SndioVoice),
.voice_size_in = sizeof(SndioVoice)
};
static void register_audio_sndio(void)
{
audio_driver_register(&sndio_audio_driver);
}
type_init(register_audio_sndio);

View File

@ -114,14 +114,19 @@ dbus_get_proxies(DBusVMState *self, GError **err)
"org.qemu.VMState1", "org.qemu.VMState1",
NULL, err); NULL, err);
if (!proxy) { if (!proxy) {
return NULL; if (err != NULL && *err != NULL) {
warn_report("%s: Failed to create proxy: %s",
__func__, (*err)->message);
g_clear_error(err);
}
continue;
} }
result = g_dbus_proxy_get_cached_property(proxy, "Id"); result = g_dbus_proxy_get_cached_property(proxy, "Id");
if (!result) { if (!result) {
g_set_error_literal(err, G_IO_ERROR, G_IO_ERROR_FAILED, warn_report("%s: VMState Id property is missing.", __func__);
"VMState Id property is missing."); g_clear_object(&proxy);
return NULL; continue;
} }
id = g_variant_dup_string(result, &size); id = g_variant_dup_string(result, &size);

View File

@ -306,22 +306,12 @@ bool host_memory_backend_is_mapped(HostMemoryBackend *backend)
return backend->is_mapped; return backend->is_mapped;
} }
#ifdef __linux__
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev) size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
{ {
Object *obj = OBJECT(memdev); size_t pagesize = qemu_ram_pagesize(memdev->mr.ram_block);
char *path = object_property_get_str(obj, "mem-path", NULL); g_assert(pagesize >= qemu_real_host_page_size());
size_t pagesize = qemu_mempath_getpagesize(path);
g_free(path);
return pagesize; return pagesize;
} }
#else
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
{
return qemu_real_host_page_size();
}
#endif
static void static void
host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)

View File

@ -32,8 +32,10 @@
#include "qemu/sockets.h" #include "qemu/sockets.h"
#include "qemu/lockable.h" #include "qemu/lockable.h"
#include "io/channel-socket.h" #include "io/channel-socket.h"
#include "sysemu/runstate.h"
#include "sysemu/tpm_backend.h" #include "sysemu/tpm_backend.h"
#include "sysemu/tpm_util.h" #include "sysemu/tpm_util.h"
#include "sysemu/runstate.h"
#include "tpm_int.h" #include "tpm_int.h"
#include "tpm_ioctl.h" #include "tpm_ioctl.h"
#include "migration/blocker.h" #include "migration/blocker.h"
@ -81,6 +83,9 @@ struct TPMEmulator {
unsigned int established_flag_cached:1; unsigned int established_flag_cached:1;
TPMBlobBuffers state_blobs; TPMBlobBuffers state_blobs;
bool relock_storage;
VMChangeStateEntry *vmstate;
}; };
struct tpm_error { struct tpm_error {
@ -302,6 +307,35 @@ static int tpm_emulator_stop_tpm(TPMBackend *tb)
return 0; return 0;
} }
static int tpm_emulator_lock_storage(TPMEmulator *tpm_emu)
{
ptm_lockstorage pls;
if (!TPM_EMULATOR_IMPLEMENTS_ALL_CAPS(tpm_emu, PTM_CAP_LOCK_STORAGE)) {
trace_tpm_emulator_lock_storage_cmd_not_supt();
return 0;
}
/* give failing side 300 * 10ms time to release lock */
pls.u.req.retries = cpu_to_be32(300);
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_LOCK_STORAGE, &pls,
sizeof(pls.u.req), sizeof(pls.u.resp)) < 0) {
error_report("tpm-emulator: Could not lock storage within 3 seconds: "
"%s", strerror(errno));
return -1;
}
pls.u.resp.tpm_result = be32_to_cpu(pls.u.resp.tpm_result);
if (pls.u.resp.tpm_result != 0) {
error_report("tpm-emulator: TPM result for CMD_LOCK_STORAGE: 0x%x %s",
pls.u.resp.tpm_result,
tpm_emulator_strerror(pls.u.resp.tpm_result));
return -1;
}
return 0;
}
static int tpm_emulator_set_buffer_size(TPMBackend *tb, static int tpm_emulator_set_buffer_size(TPMBackend *tb,
size_t wanted_size, size_t wanted_size,
size_t *actual_size) size_t *actual_size)
@ -383,6 +417,15 @@ err_exit:
static int tpm_emulator_startup_tpm(TPMBackend *tb, size_t buffersize) static int tpm_emulator_startup_tpm(TPMBackend *tb, size_t buffersize)
{ {
/* TPM startup will be done from post_load hook */
if (runstate_check(RUN_STATE_INMIGRATE)) {
if (buffersize != 0) {
return tpm_emulator_set_buffer_size(tb, buffersize, NULL);
}
return 0;
}
return tpm_emulator_startup_tpm_resume(tb, buffersize, false); return tpm_emulator_startup_tpm_resume(tb, buffersize, false);
} }
@ -843,13 +886,34 @@ static int tpm_emulator_pre_save(void *opaque)
{ {
TPMBackend *tb = opaque; TPMBackend *tb = opaque;
TPMEmulator *tpm_emu = TPM_EMULATOR(tb); TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
int ret;
trace_tpm_emulator_pre_save(); trace_tpm_emulator_pre_save();
tpm_backend_finish_sync(tb); tpm_backend_finish_sync(tb);
/* get the state blobs from the TPM */ /* get the state blobs from the TPM */
return tpm_emulator_get_state_blobs(tpm_emu); ret = tpm_emulator_get_state_blobs(tpm_emu);
tpm_emu->relock_storage = ret == 0;
return ret;
}
static void tpm_emulator_vm_state_change(void *opaque, bool running,
RunState state)
{
TPMBackend *tb = opaque;
TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
trace_tpm_emulator_vm_state_change(running, state);
if (!running || state != RUN_STATE_RUNNING || !tpm_emu->relock_storage) {
return;
}
/* lock storage after migration fall-back */
tpm_emulator_lock_storage(tpm_emu);
} }
/* /*
@ -911,6 +975,9 @@ static void tpm_emulator_inst_init(Object *obj)
tpm_emu->options = g_new0(TPMEmulatorOptions, 1); tpm_emu->options = g_new0(TPMEmulatorOptions, 1);
tpm_emu->cur_locty_number = ~0; tpm_emu->cur_locty_number = ~0;
qemu_mutex_init(&tpm_emu->mutex); qemu_mutex_init(&tpm_emu->mutex);
tpm_emu->vmstate =
qemu_add_vm_change_state_handler(tpm_emulator_vm_state_change,
tpm_emu);
vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY,
&vmstate_tpm_emulator, obj); &vmstate_tpm_emulator, obj);
@ -960,6 +1027,7 @@ static void tpm_emulator_inst_finalize(Object *obj)
tpm_sized_buffer_reset(&state_blobs->savestate); tpm_sized_buffer_reset(&state_blobs->savestate);
qemu_mutex_destroy(&tpm_emu->mutex); qemu_mutex_destroy(&tpm_emu->mutex);
qemu_del_vm_change_state_handler(tpm_emu->vmstate);
vmstate_unregister(NULL, &vmstate_tpm_emulator, obj); vmstate_unregister(NULL, &vmstate_tpm_emulator, obj);
} }

View File

@ -5,12 +5,19 @@
* *
* This file is licensed under the terms of the 3-clause BSD license * This file is licensed under the terms of the 3-clause BSD license
*/ */
#ifndef _TPM_IOCTL_H_
#define _TPM_IOCTL_H_
#ifndef TPM_IOCTL_H #if defined(__CYGWIN__)
#define TPM_IOCTL_H # define __USE_LINUX_IOCTL_DEFS
#endif
#include <stdint.h>
#include <sys/types.h>
#ifndef _WIN32
#include <sys/uio.h> #include <sys/uio.h>
#include <sys/ioctl.h> #include <sys/ioctl.h>
#endif
#ifdef HAVE_SYS_IOCCOM_H #ifdef HAVE_SYS_IOCCOM_H
#include <sys/ioccom.h> #include <sys/ioccom.h>
@ -194,6 +201,48 @@ struct ptm_setbuffersize {
} u; } u;
}; };
#define PTM_GETINFO_SIZE (3 * 1024)
/*
* PTM_GET_INFO: Get info about the TPM implementation (from libtpms)
*
* This request allows to indirectly call TPMLIB_GetInfo(flags) and
* retrieve information from libtpms.
* Only one transaction is currently necessary for returning results
* to a client. Therefore, totlength and length will be the same if
* offset is 0.
*/
struct ptm_getinfo {
union {
struct {
uint64_t flags;
uint32_t offset; /* offset from where to read */
uint32_t pad; /* 32 bit arch */
} req; /* request */
struct {
ptm_res tpm_result;
uint32_t totlength;
uint32_t length;
char buffer[PTM_GETINFO_SIZE];
} resp; /* response */
} u;
};
#define SWTPM_INFO_TPMSPECIFICATION ((uint64_t)1 << 0)
#define SWTPM_INFO_TPMATTRIBUTES ((uint64_t)1 << 1)
/*
* PTM_LOCK_STORAGE: Lock the storage and retry n times
*/
struct ptm_lockstorage {
union {
struct {
uint32_t retries; /* number of retries */
} req; /* request */
struct {
ptm_res tpm_result;
} resp; /* reponse */
} u;
};
typedef uint64_t ptm_cap; typedef uint64_t ptm_cap;
typedef struct ptm_est ptm_est; typedef struct ptm_est ptm_est;
@ -205,6 +254,8 @@ typedef struct ptm_getstate ptm_getstate;
typedef struct ptm_setstate ptm_setstate; typedef struct ptm_setstate ptm_setstate;
typedef struct ptm_getconfig ptm_getconfig; typedef struct ptm_getconfig ptm_getconfig;
typedef struct ptm_setbuffersize ptm_setbuffersize; typedef struct ptm_setbuffersize ptm_setbuffersize;
typedef struct ptm_getinfo ptm_getinfo;
typedef struct ptm_lockstorage ptm_lockstorage;
/* capability flags returned by PTM_GET_CAPABILITY */ /* capability flags returned by PTM_GET_CAPABILITY */
#define PTM_CAP_INIT (1) #define PTM_CAP_INIT (1)
@ -221,7 +272,11 @@ typedef struct ptm_setbuffersize ptm_setbuffersize;
#define PTM_CAP_GET_CONFIG (1 << 11) #define PTM_CAP_GET_CONFIG (1 << 11)
#define PTM_CAP_SET_DATAFD (1 << 12) #define PTM_CAP_SET_DATAFD (1 << 12)
#define PTM_CAP_SET_BUFFERSIZE (1 << 13) #define PTM_CAP_SET_BUFFERSIZE (1 << 13)
#define PTM_CAP_GET_INFO (1 << 14)
#define PTM_CAP_SEND_COMMAND_HEADER (1 << 15)
#define PTM_CAP_LOCK_STORAGE (1 << 16)
#ifndef _WIN32
enum { enum {
PTM_GET_CAPABILITY = _IOR('P', 0, ptm_cap), PTM_GET_CAPABILITY = _IOR('P', 0, ptm_cap),
PTM_INIT = _IOWR('P', 1, ptm_init), PTM_INIT = _IOWR('P', 1, ptm_init),
@ -240,7 +295,10 @@ enum {
PTM_GET_CONFIG = _IOR('P', 14, ptm_getconfig), PTM_GET_CONFIG = _IOR('P', 14, ptm_getconfig),
PTM_SET_DATAFD = _IOR('P', 15, ptm_res), PTM_SET_DATAFD = _IOR('P', 15, ptm_res),
PTM_SET_BUFFERSIZE = _IOWR('P', 16, ptm_setbuffersize), PTM_SET_BUFFERSIZE = _IOWR('P', 16, ptm_setbuffersize),
PTM_GET_INFO = _IOWR('P', 17, ptm_getinfo),
PTM_LOCK_STORAGE = _IOWR('P', 18, ptm_lockstorage),
}; };
#endif
/* /*
* Commands used by the non-CUSE TPMs * Commands used by the non-CUSE TPMs
@ -253,23 +311,25 @@ enum {
* and ptm_set_state:u.req.data) are 0xffffffff. * and ptm_set_state:u.req.data) are 0xffffffff.
*/ */
enum { enum {
CMD_GET_CAPABILITY = 1, CMD_GET_CAPABILITY = 1, /* 0x01 */
CMD_INIT, CMD_INIT, /* 0x02 */
CMD_SHUTDOWN, CMD_SHUTDOWN, /* 0x03 */
CMD_GET_TPMESTABLISHED, CMD_GET_TPMESTABLISHED, /* 0x04 */
CMD_SET_LOCALITY, CMD_SET_LOCALITY, /* 0x05 */
CMD_HASH_START, CMD_HASH_START, /* 0x06 */
CMD_HASH_DATA, CMD_HASH_DATA, /* 0x07 */
CMD_HASH_END, CMD_HASH_END, /* 0x08 */
CMD_CANCEL_TPM_CMD, CMD_CANCEL_TPM_CMD, /* 0x09 */
CMD_STORE_VOLATILE, CMD_STORE_VOLATILE, /* 0x0a */
CMD_RESET_TPMESTABLISHED, CMD_RESET_TPMESTABLISHED, /* 0x0b */
CMD_GET_STATEBLOB, CMD_GET_STATEBLOB, /* 0x0c */
CMD_SET_STATEBLOB, CMD_SET_STATEBLOB, /* 0x0d */
CMD_STOP, CMD_STOP, /* 0x0e */
CMD_GET_CONFIG, CMD_GET_CONFIG, /* 0x0f */
CMD_SET_DATAFD, CMD_SET_DATAFD, /* 0x10 */
CMD_SET_BUFFERSIZE, CMD_SET_BUFFERSIZE, /* 0x11 */
CMD_GET_INFO, /* 0x12 */
CMD_LOCK_STORAGE, /* 0x13 */
}; };
#endif /* TPM_IOCTL_H */ #endif /* _TPM_IOCTL_H_ */

View File

@ -20,6 +20,8 @@ tpm_emulator_set_buffer_size(uint32_t buffersize, uint32_t minsize, uint32_t max
tpm_emulator_startup_tpm_resume(bool is_resume, size_t buffersize) "is_resume: %d, buffer size: %zu" tpm_emulator_startup_tpm_resume(bool is_resume, size_t buffersize) "is_resume: %d, buffer size: %zu"
tpm_emulator_get_tpm_established_flag(uint8_t flag) "got established flag: %d" tpm_emulator_get_tpm_established_flag(uint8_t flag) "got established flag: %d"
tpm_emulator_cancel_cmd_not_supt(void) "Backend does not support CANCEL_TPM_CMD" tpm_emulator_cancel_cmd_not_supt(void) "Backend does not support CANCEL_TPM_CMD"
tpm_emulator_lock_storage_cmd_not_supt(void) "Backend does not support LOCK_STORAGE"
tpm_emulator_vm_state_change(int running, int state) "state change to running %d state %d"
tpm_emulator_handle_device_opts_tpm12(void) "TPM Version 1.2" tpm_emulator_handle_device_opts_tpm12(void) "TPM Version 1.2"
tpm_emulator_handle_device_opts_tpm2(void) "TPM Version 2" tpm_emulator_handle_device_opts_tpm2(void) "TPM Version 2"
tpm_emulator_handle_device_opts_unspec(void) "TPM Version Unspecified" tpm_emulator_handle_device_opts_unspec(void) "TPM Version Unspecified"

24
block.c
View File

@ -631,9 +631,10 @@ static int64_t create_file_fallback_truncate(BlockBackend *blk,
* Helper function for bdrv_create_file_fallback(): Zero the first * Helper function for bdrv_create_file_fallback(): Zero the first
* sector to remove any potentially pre-existing image header. * sector to remove any potentially pre-existing image header.
*/ */
static int create_file_fallback_zero_first_sector(BlockBackend *blk, static int coroutine_fn
int64_t current_size, create_file_fallback_zero_first_sector(BlockBackend *blk,
Error **errp) int64_t current_size,
Error **errp)
{ {
int64_t bytes_to_clear; int64_t bytes_to_clear;
int ret; int ret;
@ -4980,8 +4981,8 @@ static void bdrv_close(BlockDriverState *bs)
void bdrv_close_all(void) void bdrv_close_all(void)
{ {
assert(job_next(NULL) == NULL);
GLOBAL_STATE_CODE(); GLOBAL_STATE_CODE();
assert(job_next(NULL) == NULL);
/* Drop references from requests still in flight, such as canceled block /* Drop references from requests still in flight, such as canceled block
* jobs whose AIO context has not been polled yet */ * jobs whose AIO context has not been polled yet */
@ -6167,13 +6168,16 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp)
} }
} }
for (job = block_job_next(NULL); job; job = block_job_next(job)) { WITH_JOB_LOCK_GUARD() {
GSList *el; for (job = block_job_next_locked(NULL); job;
job = block_job_next_locked(job)) {
GSList *el;
xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB,
job->job.id); job->job.id);
for (el = job->nodes; el; el = el->next) { for (el = job->nodes; el; el = el->next) {
xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data); xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data);
}
} }
} }

View File

@ -38,13 +38,31 @@ void block_acct_init(BlockAcctStats *stats)
if (qtest_enabled()) { if (qtest_enabled()) {
clock_type = QEMU_CLOCK_VIRTUAL; clock_type = QEMU_CLOCK_VIRTUAL;
} }
stats->account_invalid = true;
stats->account_failed = true;
} }
void block_acct_setup(BlockAcctStats *stats, bool account_invalid, static bool bool_from_onoffauto(OnOffAuto val, bool def)
bool account_failed)
{ {
stats->account_invalid = account_invalid; switch (val) {
stats->account_failed = account_failed; case ON_OFF_AUTO_AUTO:
return def;
case ON_OFF_AUTO_ON:
return true;
case ON_OFF_AUTO_OFF:
return false;
default:
abort();
}
}
void block_acct_setup(BlockAcctStats *stats, enum OnOffAuto account_invalid,
enum OnOffAuto account_failed)
{
stats->account_invalid = bool_from_onoffauto(account_invalid,
stats->account_invalid);
stats->account_failed = bool_from_onoffauto(account_failed,
stats->account_failed);
} }
void block_acct_cleanup(BlockAcctStats *stats) void block_acct_cleanup(BlockAcctStats *stats)

View File

@ -258,7 +258,7 @@ blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true); return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true);
} }
static int blkverify_co_flush(BlockDriverState *bs) static int coroutine_fn blkverify_co_flush(BlockDriverState *bs)
{ {
BDRVBlkverifyState *s = bs->opaque; BDRVBlkverifyState *s = bs->opaque;

View File

@ -1546,7 +1546,7 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset,
return &acb->common; return &acb->common;
} }
static void blk_aio_read_entry(void *opaque) static void coroutine_fn blk_aio_read_entry(void *opaque)
{ {
BlkAioEmAIOCB *acb = opaque; BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco; BlkRwCo *rwco = &acb->rwco;
@ -1558,7 +1558,7 @@ static void blk_aio_read_entry(void *opaque)
blk_aio_complete(acb); blk_aio_complete(acb);
} }
static void blk_aio_write_entry(void *opaque) static void coroutine_fn blk_aio_write_entry(void *opaque)
{ {
BlkAioEmAIOCB *acb = opaque; BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco; BlkRwCo *rwco = &acb->rwco;
@ -1669,7 +1669,7 @@ int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req,
return ret; return ret;
} }
static void blk_aio_ioctl_entry(void *opaque) static void coroutine_fn blk_aio_ioctl_entry(void *opaque)
{ {
BlkAioEmAIOCB *acb = opaque; BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco; BlkRwCo *rwco = &acb->rwco;
@ -1703,7 +1703,7 @@ blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
return bdrv_co_pdiscard(blk->root, offset, bytes); return bdrv_co_pdiscard(blk->root, offset, bytes);
} }
static void blk_aio_pdiscard_entry(void *opaque) static void coroutine_fn blk_aio_pdiscard_entry(void *opaque)
{ {
BlkAioEmAIOCB *acb = opaque; BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco; BlkRwCo *rwco = &acb->rwco;
@ -1747,7 +1747,7 @@ static int coroutine_fn blk_co_do_flush(BlockBackend *blk)
return bdrv_co_flush(blk_bs(blk)); return bdrv_co_flush(blk_bs(blk));
} }
static void blk_aio_flush_entry(void *opaque) static void coroutine_fn blk_aio_flush_entry(void *opaque)
{ {
BlkAioEmAIOCB *acb = opaque; BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco; BlkRwCo *rwco = &acb->rwco;

View File

@ -203,9 +203,9 @@ static int coroutine_fn cbw_co_flush(BlockDriverState *bs)
* It's guaranteed that guest writes will not interact in the region until * It's guaranteed that guest writes will not interact in the region until
* cbw_snapshot_read_unlock() called. * cbw_snapshot_read_unlock() called.
*/ */
static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs, static coroutine_fn BlockReq *
int64_t offset, int64_t bytes, cbw_snapshot_read_lock(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum, BdrvChild **file) int64_t *pnum, BdrvChild **file)
{ {
BDRVCopyBeforeWriteState *s = bs->opaque; BDRVCopyBeforeWriteState *s = bs->opaque;
BlockReq *req = g_new(BlockReq, 1); BlockReq *req = g_new(BlockReq, 1);
@ -240,7 +240,8 @@ static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs,
return req; return req;
} }
static void cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req) static coroutine_fn void
cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req)
{ {
BDRVCopyBeforeWriteState *s = bs->opaque; BDRVCopyBeforeWriteState *s = bs->opaque;

View File

@ -855,7 +855,7 @@ out_noclean:
return -EINVAL; return -EINVAL;
} }
static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) static void coroutine_fn curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
{ {
CURLState *state; CURLState *state;
int running; int running;

View File

@ -154,7 +154,6 @@ typedef struct BDRVRawState {
bool has_discard:1; bool has_discard:1;
bool has_write_zeroes:1; bool has_write_zeroes:1;
bool discard_zeroes:1;
bool use_linux_aio:1; bool use_linux_aio:1;
bool use_linux_io_uring:1; bool use_linux_io_uring:1;
int page_cache_inconsistent; /* errno from fdatasync failure */ int page_cache_inconsistent; /* errno from fdatasync failure */
@ -755,7 +754,6 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
} else { } else {
s->discard_zeroes = true;
s->has_fallocate = true; s->has_fallocate = true;
} }
} else { } else {
@ -769,19 +767,12 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
} }
if (S_ISBLK(st.st_mode)) { if (S_ISBLK(st.st_mode)) {
#ifdef BLKDISCARDZEROES
unsigned int arg;
if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) {
s->discard_zeroes = true;
}
#endif
#ifdef __linux__ #ifdef __linux__
/* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do
* not rely on the contents of discarded blocks unless using O_DIRECT. * not rely on the contents of discarded blocks unless using O_DIRECT.
* Same for BLKZEROOUT. * Same for BLKZEROOUT.
*/ */
if (!(bs->open_flags & BDRV_O_NOCACHE)) { if (!(bs->open_flags & BDRV_O_NOCACHE)) {
s->discard_zeroes = false;
s->has_write_zeroes = false; s->has_write_zeroes = false;
} }
#endif #endif
@ -1295,7 +1286,7 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
} }
#endif #endif
if (bs->sg || S_ISBLK(st.st_mode)) { if (bdrv_is_sg(bs) || S_ISBLK(st.st_mode)) {
int ret = hdev_get_max_hw_transfer(s->fd, &st); int ret = hdev_get_max_hw_transfer(s->fd, &st);
if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) { if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) {
@ -2061,6 +2052,28 @@ static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs,
return thread_pool_submit_co(pool, func, arg); return thread_pool_submit_co(pool, func, arg);
} }
/*
* Check if all memory in this vector is sector aligned.
*/
static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
{
int i;
size_t alignment = bdrv_min_mem_align(bs);
size_t len = bs->bl.request_alignment;
IO_CODE();
for (i = 0; i < qiov->niov; i++) {
if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
return false;
}
if (qiov->iov[i].iov_len % len) {
return false;
}
}
return true;
}
static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int type) uint64_t bytes, QEMUIOVector *qiov, int type)
{ {
@ -2158,7 +2171,7 @@ static void raw_aio_unplug(BlockDriverState *bs)
#endif #endif
} }
static int raw_co_flush_to_disk(BlockDriverState *bs) static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
{ {
BDRVRawState *s = bs->opaque; BDRVRawState *s = bs->opaque;
RawPosixAIOData acb; RawPosixAIOData acb;

View File

@ -1555,7 +1555,6 @@ static BlockDriver bdrv_gluster = {
.format_name = "gluster", .format_name = "gluster",
.protocol_name = "gluster", .protocol_name = "gluster",
.instance_size = sizeof(BDRVGlusterState), .instance_size = sizeof(BDRVGlusterState),
.bdrv_needs_filename = false,
.bdrv_file_open = qemu_gluster_open, .bdrv_file_open = qemu_gluster_open,
.bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
.bdrv_reopen_commit = qemu_gluster_reopen_commit, .bdrv_reopen_commit = qemu_gluster_reopen_commit,
@ -1585,7 +1584,6 @@ static BlockDriver bdrv_gluster_tcp = {
.format_name = "gluster", .format_name = "gluster",
.protocol_name = "gluster+tcp", .protocol_name = "gluster+tcp",
.instance_size = sizeof(BDRVGlusterState), .instance_size = sizeof(BDRVGlusterState),
.bdrv_needs_filename = false,
.bdrv_file_open = qemu_gluster_open, .bdrv_file_open = qemu_gluster_open,
.bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
.bdrv_reopen_commit = qemu_gluster_reopen_commit, .bdrv_reopen_commit = qemu_gluster_reopen_commit,
@ -1615,7 +1613,6 @@ static BlockDriver bdrv_gluster_unix = {
.format_name = "gluster", .format_name = "gluster",
.protocol_name = "gluster+unix", .protocol_name = "gluster+unix",
.instance_size = sizeof(BDRVGlusterState), .instance_size = sizeof(BDRVGlusterState),
.bdrv_needs_filename = true,
.bdrv_file_open = qemu_gluster_open, .bdrv_file_open = qemu_gluster_open,
.bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
.bdrv_reopen_commit = qemu_gluster_reopen_commit, .bdrv_reopen_commit = qemu_gluster_reopen_commit,
@ -1651,7 +1648,6 @@ static BlockDriver bdrv_gluster_rdma = {
.format_name = "gluster", .format_name = "gluster",
.protocol_name = "gluster+rdma", .protocol_name = "gluster+rdma",
.instance_size = sizeof(BDRVGlusterState), .instance_size = sizeof(BDRVGlusterState),
.bdrv_needs_filename = true,
.bdrv_file_open = qemu_gluster_open, .bdrv_file_open = qemu_gluster_open,
.bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
.bdrv_reopen_commit = qemu_gluster_reopen_commit, .bdrv_reopen_commit = qemu_gluster_reopen_commit,

View File

@ -751,11 +751,11 @@ static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
/** /**
* Add an active request to the tracked requests list * Add an active request to the tracked requests list
*/ */
static void tracked_request_begin(BdrvTrackedRequest *req, static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
BlockDriverState *bs, BlockDriverState *bs,
int64_t offset, int64_t offset,
int64_t bytes, int64_t bytes,
enum BdrvTrackedRequestType type) enum BdrvTrackedRequestType type)
{ {
bdrv_check_request(offset, bytes, &error_abort); bdrv_check_request(offset, bytes, &error_abort);
@ -794,7 +794,7 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
} }
/* Called with self->bs->reqs_lock held */ /* Called with self->bs->reqs_lock held */
static BdrvTrackedRequest * static coroutine_fn BdrvTrackedRequest *
bdrv_find_conflicting_request(BdrvTrackedRequest *self) bdrv_find_conflicting_request(BdrvTrackedRequest *self)
{ {
BdrvTrackedRequest *req; BdrvTrackedRequest *req;
@ -828,20 +828,16 @@ bdrv_find_conflicting_request(BdrvTrackedRequest *self)
} }
/* Called with self->bs->reqs_lock held */ /* Called with self->bs->reqs_lock held */
static bool coroutine_fn static void coroutine_fn
bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
{ {
BdrvTrackedRequest *req; BdrvTrackedRequest *req;
bool waited = false;
while ((req = bdrv_find_conflicting_request(self))) { while ((req = bdrv_find_conflicting_request(self))) {
self->waiting_for = req; self->waiting_for = req;
qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
self->waiting_for = NULL; self->waiting_for = NULL;
waited = true;
} }
return waited;
} }
/* Called with req->bs->reqs_lock held */ /* Called with req->bs->reqs_lock held */
@ -934,36 +930,31 @@ void bdrv_dec_in_flight(BlockDriverState *bs)
bdrv_wakeup(bs); bdrv_wakeup(bs);
} }
static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) static void coroutine_fn
bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
{ {
BlockDriverState *bs = self->bs; BlockDriverState *bs = self->bs;
bool waited = false;
if (!qatomic_read(&bs->serialising_in_flight)) { if (!qatomic_read(&bs->serialising_in_flight)) {
return false; return;
} }
qemu_co_mutex_lock(&bs->reqs_lock); qemu_co_mutex_lock(&bs->reqs_lock);
waited = bdrv_wait_serialising_requests_locked(self); bdrv_wait_serialising_requests_locked(self);
qemu_co_mutex_unlock(&bs->reqs_lock); qemu_co_mutex_unlock(&bs->reqs_lock);
return waited;
} }
bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
uint64_t align) uint64_t align)
{ {
bool waited;
IO_CODE(); IO_CODE();
qemu_co_mutex_lock(&req->bs->reqs_lock); qemu_co_mutex_lock(&req->bs->reqs_lock);
tracked_request_set_serialising(req, align); tracked_request_set_serialising(req, align);
waited = bdrv_wait_serialising_requests_locked(req); bdrv_wait_serialising_requests_locked(req);
qemu_co_mutex_unlock(&req->bs->reqs_lock); qemu_co_mutex_unlock(&req->bs->reqs_lock);
return waited;
} }
int bdrv_check_qiov_request(int64_t offset, int64_t bytes, int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
@ -1644,10 +1635,10 @@ static bool bdrv_init_padding(BlockDriverState *bs,
return true; return true;
} }
static int bdrv_padding_rmw_read(BdrvChild *child, static coroutine_fn int bdrv_padding_rmw_read(BdrvChild *child,
BdrvTrackedRequest *req, BdrvTrackedRequest *req,
BdrvRequestPadding *pad, BdrvRequestPadding *pad,
bool zero_middle) bool zero_middle)
{ {
QEMUIOVector local_qiov; QEMUIOVector local_qiov;
BlockDriverState *bs = child->bs; BlockDriverState *bs = child->bs;
@ -3168,7 +3159,7 @@ out:
return ret; return ret;
} }
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
{ {
BlockDriver *drv = bs->drv; BlockDriver *drv = bs->drv;
CoroutineIOCompletion co = { CoroutineIOCompletion co = {
@ -3236,27 +3227,6 @@ void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
return mem; return mem;
} }
/*
* Check if all memory in this vector is sector aligned.
*/
bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
{
int i;
size_t alignment = bdrv_min_mem_align(bs);
IO_CODE();
for (i = 0; i < qiov->niov; i++) {
if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
return false;
}
if (qiov->iov[i].iov_len % alignment) {
return false;
}
}
return true;
}
void bdrv_io_plug(BlockDriverState *bs) void bdrv_io_plug(BlockDriverState *bs)
{ {
BdrvChild *child; BdrvChild *child;

View File

@ -11,6 +11,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include <liburing.h> #include <liburing.h>
#include "block/aio.h" #include "block/aio.h"
#include "qemu/error-report.h"
#include "qemu/queue.h" #include "qemu/queue.h"
#include "block/block.h" #include "block/block.h"
#include "block/raw-aio.h" #include "block/raw-aio.h"

View File

@ -290,7 +290,8 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
} }
} }
static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) static void coroutine_fn
iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask)
{ {
*iTask = (struct IscsiTask) { *iTask = (struct IscsiTask) {
.co = qemu_coroutine_self(), .co = qemu_coroutine_self(),
@ -2065,7 +2066,7 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
uint64_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff; uint64_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff;
unsigned int block_size = MAX(BDRV_SECTOR_SIZE, iscsilun->block_size); unsigned int block_size = MAX(BDRV_SECTOR_SIZE, iscsilun->block_size);
assert(iscsilun->block_size >= BDRV_SECTOR_SIZE || bs->sg); assert(iscsilun->block_size >= BDRV_SECTOR_SIZE || bdrv_is_sg(bs));
bs->bl.request_alignment = block_size; bs->bl.request_alignment = block_size;

View File

@ -461,7 +461,7 @@ LinuxAioState *laio_init(Error **errp)
s = g_malloc0(sizeof(*s)); s = g_malloc0(sizeof(*s));
rc = event_notifier_init(&s->e, false); rc = event_notifier_init(&s->e, false);
if (rc < 0) { if (rc < 0) {
error_setg_errno(errp, -rc, "failed to to initialize event notifier"); error_setg_errno(errp, -rc, "failed to initialize event notifier");
goto out_free_state; goto out_free_state;
} }

View File

@ -894,6 +894,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
BlockDriverState *bs = s->mirror_top_bs->backing->bs; BlockDriverState *bs = s->mirror_top_bs->backing->bs;
BlockDriverState *target_bs = blk_bs(s->target); BlockDriverState *target_bs = blk_bs(s->target);
bool need_drain = true; bool need_drain = true;
BlockDeviceIoStatus iostatus;
int64_t length; int64_t length;
int64_t target_length; int64_t target_length;
BlockDriverInfo bdi; BlockDriverInfo bdi;
@ -1016,8 +1017,11 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
* We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
* an error, or when the source is clean, whichever comes first. */ * an error, or when the source is clean, whichever comes first. */
delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
WITH_JOB_LOCK_GUARD() {
iostatus = s->common.iostatus;
}
if (delta < BLOCK_JOB_SLICE_TIME && if (delta < BLOCK_JOB_SLICE_TIME &&
s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
(cnt == 0 && s->in_flight > 0)) { (cnt == 0 && s->in_flight > 0)) {
trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
@ -1152,8 +1156,10 @@ static void mirror_complete(Job *job, Error **errp)
s->should_complete = true; s->should_complete = true;
/* If the job is paused, it will be re-entered when it is resumed */ /* If the job is paused, it will be re-entered when it is resumed */
if (!job->paused) { WITH_JOB_LOCK_GUARD() {
job_enter(job); if (!job->paused) {
job_enter_cond_locked(job, NULL);
}
} }
} }
@ -1173,8 +1179,11 @@ static bool mirror_drained_poll(BlockJob *job)
* from one of our own drain sections, to avoid a deadlock waiting for * from one of our own drain sections, to avoid a deadlock waiting for
* ourselves. * ourselves.
*/ */
if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) { WITH_JOB_LOCK_GUARD() {
return true; if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
&& !s->in_drain) {
return true;
}
} }
return !!s->in_flight; return !!s->in_flight;

View File

@ -638,16 +638,16 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
assert(!info || !info->has_inserted || info->inserted == inserted); assert(!info || !info->has_inserted || info->inserted == inserted);
if (info && *info->device) { if (info && *info->device) {
monitor_printf(mon, "%s", info->device); monitor_puts(mon, info->device);
if (inserted && inserted->has_node_name) { if (inserted && inserted->has_node_name) {
monitor_printf(mon, " (%s)", inserted->node_name); monitor_printf(mon, " (%s)", inserted->node_name);
} }
} else { } else {
assert(info || inserted); assert(info || inserted);
monitor_printf(mon, "%s", monitor_puts(mon,
inserted && inserted->has_node_name ? inserted->node_name inserted && inserted->has_node_name ? inserted->node_name
: info && info->has_qdev ? info->qdev : info && info->has_qdev ? info->qdev
: "<anonymous>"); : "<anonymous>");
} }
if (inserted) { if (inserted) {

View File

@ -983,11 +983,12 @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
* nbd_reply_chunk_iter_receive * nbd_reply_chunk_iter_receive
* The pointer stored in @payload requires g_free() to free it. * The pointer stored in @payload requires g_free() to free it.
*/ */
static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s, static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
NBDReplyChunkIter *iter, NBDReplyChunkIter *iter,
uint64_t handle, uint64_t handle,
QEMUIOVector *qiov, NBDReply *reply, QEMUIOVector *qiov,
void **payload) NBDReply *reply,
void **payload)
{ {
int ret, request_ret; int ret, request_ret;
NBDReply local_reply; NBDReply local_reply;

View File

@ -223,7 +223,7 @@ static void nfs_process_write(void *arg)
qemu_mutex_unlock(&client->mutex); qemu_mutex_unlock(&client->mutex);
} }
static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) static void coroutine_fn nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
{ {
*task = (NFSRPC) { *task = (NFSRPC) {
.co = qemu_coroutine_self(), .co = qemu_coroutine_self(),

View File

@ -293,34 +293,42 @@ static void nvme_kick(NVMeQueuePair *q)
q->need_kick = 0; q->need_kick = 0;
} }
/* Find a free request element if any, otherwise: static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
* a) if in coroutine context, try to wait for one to become available;
* b) if not in coroutine, return NULL;
*/
static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
{ {
NVMeRequest *req; NVMeRequest *req;
qemu_mutex_lock(&q->lock);
while (q->free_req_head == -1) {
if (qemu_in_coroutine()) {
trace_nvme_free_req_queue_wait(q->s, q->index);
qemu_co_queue_wait(&q->free_req_queue, &q->lock);
} else {
qemu_mutex_unlock(&q->lock);
return NULL;
}
}
req = &q->reqs[q->free_req_head]; req = &q->reqs[q->free_req_head];
q->free_req_head = req->free_req_next; q->free_req_head = req->free_req_next;
req->free_req_next = -1; req->free_req_next = -1;
qemu_mutex_unlock(&q->lock);
return req; return req;
} }
/* Return a free request element if any, otherwise return NULL. */
static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
{
QEMU_LOCK_GUARD(&q->lock);
if (q->free_req_head == -1) {
return NULL;
}
return nvme_get_free_req_nofail_locked(q);
}
/*
* Wait for a free request to become available if necessary, then
* return it.
*/
static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
{
QEMU_LOCK_GUARD(&q->lock);
while (q->free_req_head == -1) {
trace_nvme_free_req_queue_wait(q->s, q->index);
qemu_co_queue_wait(&q->free_req_queue, &q->lock);
}
return nvme_get_free_req_nofail_locked(q);
}
/* With q->lock */ /* With q->lock */
static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
{ {
@ -506,7 +514,7 @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
AioContext *aio_context = bdrv_get_aio_context(bs); AioContext *aio_context = bdrv_get_aio_context(bs);
NVMeRequest *req; NVMeRequest *req;
int ret = -EINPROGRESS; int ret = -EINPROGRESS;
req = nvme_get_free_req(q); req = nvme_get_free_req_nowait(q);
if (!req) { if (!req) {
return -EBUSY; return -EBUSY;
} }
@ -1234,8 +1242,10 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs,
return true; return true;
} }
static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, static coroutine_fn int nvme_co_prw(BlockDriverState *bs,
QEMUIOVector *qiov, bool is_write, int flags) uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, bool is_write,
int flags)
{ {
BDRVNVMeState *s = bs->opaque; BDRVNVMeState *s = bs->opaque;
int r; int r;

View File

@ -165,8 +165,9 @@ static int64_t block_status(BDRVParallelsState *s, int64_t sector_num,
return start_off; return start_off;
} }
static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num, static coroutine_fn int64_t allocate_clusters(BlockDriverState *bs,
int nb_sectors, int *pnum) int64_t sector_num,
int nb_sectors, int *pnum)
{ {
int ret = 0; int ret = 0;
BDRVParallelsState *s = bs->opaque; BDRVParallelsState *s = bs->opaque;
@ -241,8 +242,8 @@ static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num,
return ret; return ret;
} }
ret = bdrv_co_pwritev(bs->file, s->data_end * BDRV_SECTOR_SIZE, ret = bdrv_co_pwrite(bs->file, s->data_end * BDRV_SECTOR_SIZE,
nb_cow_bytes, buf, 0); nb_cow_bytes, buf, 0);
qemu_vfree(buf); qemu_vfree(buf);
if (ret < 0) { if (ret < 0) {
return ret; return ret;

View File

@ -787,10 +787,10 @@ static int bitmap_list_store(BlockDriverState *bs, Qcow2BitmapList *bm_list,
} }
} }
/* Actually, even in in-place case ignoring QCOW2_OL_BITMAP_DIRECTORY is not /* Actually, even in the in-place case ignoring QCOW2_OL_BITMAP_DIRECTORY
* necessary, because we drop QCOW2_AUTOCLEAR_BITMAPS when updating bitmap * is not necessary, because we drop QCOW2_AUTOCLEAR_BITMAPS when updating
* directory in-place (actually, turn-off the extension), which is checked * bitmap directory in-place (actually, turn-off the extension), which is
* in qcow2_check_metadata_overlap() */ * checked in qcow2_check_metadata_overlap() */
ret = qcow2_pre_write_overlap_check( ret = qcow2_pre_write_overlap_check(
bs, in_place ? QCOW2_OL_BITMAP_DIRECTORY : 0, dir_offset, dir_size, bs, in_place ? QCOW2_OL_BITMAP_DIRECTORY : 0, dir_offset, dir_size,
false); false);
@ -1208,7 +1208,7 @@ int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp)
} }
} }
g_slist_foreach(ro_dirty_bitmaps, set_readonly_helper, false); g_slist_foreach(ro_dirty_bitmaps, set_readonly_helper, (gpointer)false);
ret = 0; ret = 0;
out: out:

View File

@ -884,7 +884,7 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
return 0; return 0;
} }
static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) static int coroutine_fn perform_cow(BlockDriverState *bs, QCowL2Meta *m)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
Qcow2COWRegion *start = &m->cow_start; Qcow2COWRegion *start = &m->cow_start;
@ -1024,7 +1024,8 @@ fail:
return ret; return ret;
} }
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs,
QCowL2Meta *m)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
int i, j = 0, l2_index, ret; int i, j = 0, l2_index, ret;
@ -1397,8 +1398,9 @@ static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters,
* information on cluster allocation may be invalid now. The caller * information on cluster allocation may be invalid now. The caller
* must start over anyway, so consider *cur_bytes undefined. * must start over anyway, so consider *cur_bytes undefined.
*/ */
static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, static int coroutine_fn handle_dependencies(BlockDriverState *bs,
uint64_t *cur_bytes, QCowL2Meta **m) uint64_t guest_offset,
uint64_t *cur_bytes, QCowL2Meta **m)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
QCowL2Meta *old_alloc; QCowL2Meta *old_alloc;
@ -1772,9 +1774,10 @@ out:
* *
* Return 0 on success and -errno in error cases * Return 0 on success and -errno in error cases
*/ */
int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset, unsigned int *bytes,
QCowL2Meta **m) uint64_t *host_offset,
QCowL2Meta **m)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
uint64_t start, remaining; uint64_t start, remaining;
@ -2105,8 +2108,8 @@ out:
return ret; return ret;
} }
int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, int flags) uint64_t bytes, int flags)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
uint64_t end_offset = offset + bytes; uint64_t end_offset = offset + bytes;

View File

@ -1206,7 +1206,7 @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
} }
} }
int coroutine_fn qcow2_write_caches(BlockDriverState *bs) int qcow2_write_caches(BlockDriverState *bs)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
int ret; int ret;
@ -1226,7 +1226,7 @@ int coroutine_fn qcow2_write_caches(BlockDriverState *bs)
return 0; return 0;
} }
int coroutine_fn qcow2_flush_caches(BlockDriverState *bs) int qcow2_flush_caches(BlockDriverState *bs)
{ {
int ret = qcow2_write_caches(bs); int ret = qcow2_write_caches(bs);
if (ret < 0) { if (ret < 0) {
@ -3706,7 +3706,7 @@ int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size)
return -EIO; return -EIO;
} }
int qcow2_detect_metadata_preallocation(BlockDriverState *bs) int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
int64_t i, end_cluster, cluster_count = 0, threshold; int64_t i, end_cluster, cluster_count = 0, threshold;

View File

@ -275,6 +275,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
if (ret < 0) { if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
"Could not read table"); "Could not read table");
g_free(feature_table);
return ret; return ret;
} }
@ -1696,16 +1697,27 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
} }
s->image_backing_file = g_malloc(len + 1);
ret = bdrv_pread(bs->file, header.backing_file_offset, len, ret = bdrv_pread(bs->file, header.backing_file_offset, len,
bs->auto_backing_file, 0); s->image_backing_file, 0);
if (ret < 0) { if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read backing file name"); error_setg_errno(errp, -ret, "Could not read backing file name");
goto fail; goto fail;
} }
bs->auto_backing_file[len] = '\0'; s->image_backing_file[len] = '\0';
pstrcpy(bs->backing_file, sizeof(bs->backing_file),
bs->auto_backing_file); /*
s->image_backing_file = g_strdup(bs->auto_backing_file); * Update only when something has changed. This function is called by
* qcow2_co_invalidate_cache(), and we do not want to reset
* auto_backing_file unless necessary.
*/
if (!g_str_equal(s->image_backing_file, bs->backing_file)) {
pstrcpy(bs->backing_file, sizeof(bs->backing_file),
s->image_backing_file);
pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
s->image_backing_file);
}
} }
/* /*
@ -2436,7 +2448,7 @@ static bool merge_cow(uint64_t offset, unsigned bytes,
* Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error. * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error.
* Note that returning 0 does not guarantee non-zero data. * Note that returning 0 does not guarantee non-zero data.
*/ */
static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) static int coroutine_fn is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
{ {
/* /*
* This check is designed for optimization shortcut so it must be * This check is designed for optimization shortcut so it must be
@ -2454,7 +2466,8 @@ static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
m->cow_end.nb_bytes); m->cow_end.nb_bytes);
} }
static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) static int coroutine_fn handle_alloc_space(BlockDriverState *bs,
QCowL2Meta *l2meta)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
QCowL2Meta *m; QCowL2Meta *m;

View File

@ -874,8 +874,8 @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
int qcow2_update_snapshot_refcount(BlockDriverState *bs, int qcow2_update_snapshot_refcount(BlockDriverState *bs,
int64_t l1_table_offset, int l1_size, int addend); int64_t l1_table_offset, int l1_size, int addend);
int coroutine_fn qcow2_flush_caches(BlockDriverState *bs); int qcow2_flush_caches(BlockDriverState *bs);
int coroutine_fn qcow2_write_caches(BlockDriverState *bs); int qcow2_write_caches(BlockDriverState *bs);
int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix); BdrvCheckMode fix);
@ -895,7 +895,7 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
void *cb_opaque, Error **errp); void *cb_opaque, Error **errp);
int qcow2_shrink_reftable(BlockDriverState *bs); int qcow2_shrink_reftable(BlockDriverState *bs);
int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size); int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
int qcow2_detect_metadata_preallocation(BlockDriverState *bs); int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs);
/* qcow2-cluster.c functions */ /* qcow2-cluster.c functions */
int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
@ -908,9 +908,9 @@ int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset, unsigned int *bytes, uint64_t *host_offset,
QCow2SubclusterType *subcluster_type); QCow2SubclusterType *subcluster_type);
int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset, unsigned int *bytes,
QCowL2Meta **m); uint64_t *host_offset, QCowL2Meta **m);
int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset, uint64_t offset,
int compressed_size, int compressed_size,
@ -918,13 +918,14 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry, void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
uint64_t *coffset, int *csize); uint64_t *coffset, int *csize);
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m); int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs,
QCowL2Meta *m);
void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m); void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, enum qcow2_discard_type type, uint64_t bytes, enum qcow2_discard_type type,
bool full_discard); bool full_discard);
int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, int flags); uint64_t bytes, int flags);
int qcow2_expand_zero_clusters(BlockDriverState *bs, int qcow2_expand_zero_clusters(BlockDriverState *bs,
BlockDriverAmendStatusCB *status_cb, BlockDriverAmendStatusCB *status_cb,

View File

@ -254,7 +254,7 @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
return l2_table; return l2_table;
} }
static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s)
{ {
qemu_co_mutex_lock(&s->table_lock); qemu_co_mutex_lock(&s->table_lock);
@ -273,7 +273,7 @@ static bool qed_plug_allocating_write_reqs(BDRVQEDState *s)
return true; return true;
} }
static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s)
{ {
qemu_co_mutex_lock(&s->table_lock); qemu_co_mutex_lock(&s->table_lock);
assert(s->allocating_write_reqs_plugged); assert(s->allocating_write_reqs_plugged);
@ -445,6 +445,8 @@ static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options,
} }
if ((s->header.features & QED_F_BACKING_FILE)) { if ((s->header.features & QED_F_BACKING_FILE)) {
g_autofree char *backing_file_str = NULL;
if ((uint64_t)s->header.backing_filename_offset + if ((uint64_t)s->header.backing_filename_offset +
s->header.backing_filename_size > s->header.backing_filename_size >
s->header.cluster_size * s->header.header_size) { s->header.cluster_size * s->header.header_size) {
@ -452,16 +454,21 @@ static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options,
return -EINVAL; return -EINVAL;
} }
backing_file_str = g_malloc(sizeof(bs->backing_file));
ret = qed_read_string(bs->file, s->header.backing_filename_offset, ret = qed_read_string(bs->file, s->header.backing_filename_offset,
s->header.backing_filename_size, s->header.backing_filename_size,
bs->auto_backing_file, backing_file_str, sizeof(bs->backing_file));
sizeof(bs->auto_backing_file));
if (ret < 0) { if (ret < 0) {
error_setg(errp, "Failed to read backing filename"); error_setg(errp, "Failed to read backing filename");
return ret; return ret;
} }
pstrcpy(bs->backing_file, sizeof(bs->backing_file),
bs->auto_backing_file); if (!g_str_equal(backing_file_str, bs->backing_file)) {
pstrcpy(bs->backing_file, sizeof(bs->backing_file),
backing_file_str);
pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
backing_file_str);
}
if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");

View File

@ -161,11 +161,10 @@ static bool quorum_64bits_compare(QuorumVoteValue *a, QuorumVoteValue *b)
return a->l == b->l; return a->l == b->l;
} }
static QuorumAIOCB *quorum_aio_get(BlockDriverState *bs, static QuorumAIOCB *coroutine_fn quorum_aio_get(BlockDriverState *bs,
QEMUIOVector *qiov, QEMUIOVector *qiov,
uint64_t offset, uint64_t offset, uint64_t bytes,
uint64_t bytes, int flags)
int flags)
{ {
BDRVQuorumState *s = bs->opaque; BDRVQuorumState *s = bs->opaque;
QuorumAIOCB *acb = g_new(QuorumAIOCB, 1); QuorumAIOCB *acb = g_new(QuorumAIOCB, 1);
@ -233,8 +232,6 @@ static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb)
return false; return false;
} }
static int read_fifo_child(QuorumAIOCB *acb);
static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source) static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source)
{ {
int i; int i;
@ -273,7 +270,7 @@ static void quorum_report_bad_versions(BDRVQuorumState *s,
} }
} }
static void quorum_rewrite_entry(void *opaque) static void coroutine_fn quorum_rewrite_entry(void *opaque)
{ {
QuorumCo *co = opaque; QuorumCo *co = opaque;
QuorumAIOCB *acb = co->acb; QuorumAIOCB *acb = co->acb;
@ -574,7 +571,7 @@ free_exit:
quorum_free_vote_list(&acb->votes); quorum_free_vote_list(&acb->votes);
} }
static void read_quorum_children_entry(void *opaque) static void coroutine_fn read_quorum_children_entry(void *opaque)
{ {
QuorumCo *co = opaque; QuorumCo *co = opaque;
QuorumAIOCB *acb = co->acb; QuorumAIOCB *acb = co->acb;
@ -602,7 +599,7 @@ static void read_quorum_children_entry(void *opaque)
} }
} }
static int read_quorum_children(QuorumAIOCB *acb) static int coroutine_fn read_quorum_children(QuorumAIOCB *acb)
{ {
BDRVQuorumState *s = acb->bs->opaque; BDRVQuorumState *s = acb->bs->opaque;
int i; int i;
@ -643,7 +640,7 @@ static int read_quorum_children(QuorumAIOCB *acb)
return acb->vote_ret; return acb->vote_ret;
} }
static int read_fifo_child(QuorumAIOCB *acb) static int coroutine_fn read_fifo_child(QuorumAIOCB *acb)
{ {
BDRVQuorumState *s = acb->bs->opaque; BDRVQuorumState *s = acb->bs->opaque;
int n, ret; int n, ret;
@ -664,8 +661,10 @@ static int read_fifo_child(QuorumAIOCB *acb)
return ret; return ret;
} }
static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, static int coroutine_fn quorum_co_preadv(BlockDriverState *bs,
QEMUIOVector *qiov, BdrvRequestFlags flags) int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{ {
BDRVQuorumState *s = bs->opaque; BDRVQuorumState *s = bs->opaque;
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
@ -684,7 +683,7 @@ static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
return ret; return ret;
} }
static void write_quorum_entry(void *opaque) static void coroutine_fn write_quorum_entry(void *opaque)
{ {
QuorumCo *co = opaque; QuorumCo *co = opaque;
QuorumAIOCB *acb = co->acb; QuorumAIOCB *acb = co->acb;
@ -715,9 +714,9 @@ static void write_quorum_entry(void *opaque)
} }
} }
static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset, static int coroutine_fn quorum_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov, int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags) BdrvRequestFlags flags)
{ {
BDRVQuorumState *s = bs->opaque; BDRVQuorumState *s = bs->opaque;
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
@ -746,8 +745,9 @@ static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset,
return ret; return ret;
} }
static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, static int coroutine_fn quorum_co_pwrite_zeroes(BlockDriverState *bs,
int64_t bytes, BdrvRequestFlags flags) int64_t offset, int64_t bytes,
BdrvRequestFlags flags)
{ {
return quorum_co_pwritev(bs, offset, bytes, NULL, return quorum_co_pwritev(bs, offset, bytes, NULL,

View File

@ -411,7 +411,8 @@ static void raw_lock_medium(BlockDriverState *bs, bool locked)
bdrv_lock_medium(bs->file->bs, locked); bdrv_lock_medium(bs->file->bs, locked);
} }
static int raw_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) static int coroutine_fn raw_co_ioctl(BlockDriverState *bs,
unsigned long int req, void *buf)
{ {
BDRVRawState *s = bs->opaque; BDRVRawState *s = bs->opaque;
if (s->offset || s->has_size) { if (s->offset || s->has_size) {
@ -463,7 +464,7 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
return -EINVAL; return -EINVAL;
} }
bs->sg = bs->file->bs->sg; bs->sg = bdrv_is_sg(bs->file->bs);
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED | bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags); (BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
@ -489,7 +490,7 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
return ret; return ret;
} }
if (bs->sg && (s->offset || s->has_size)) { if (bdrv_is_sg(bs) && (s->offset || s->has_size)) {
error_setg(errp, "Cannot use offset/size with SCSI generic devices"); error_setg(errp, "Cannot use offset/size with SCSI generic devices");
return -EINVAL; return -EINVAL;
} }

View File

@ -142,6 +142,7 @@ static void replication_close(BlockDriverState *bs)
{ {
BDRVReplicationState *s = bs->opaque; BDRVReplicationState *s = bs->opaque;
Job *commit_job; Job *commit_job;
GLOBAL_STATE_CODE();
if (s->stage == BLOCK_REPLICATION_RUNNING) { if (s->stage == BLOCK_REPLICATION_RUNNING) {
replication_stop(s->rs, false, NULL); replication_stop(s->rs, false, NULL);
@ -726,7 +727,9 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
* disk, secondary disk in backup_job_completed(). * disk, secondary disk in backup_job_completed().
*/ */
if (s->backup_job) { if (s->backup_job) {
aio_context_release(aio_context);
job_cancel_sync(&s->backup_job->job, true); job_cancel_sync(&s->backup_job->job, true);
aio_context_acquire(aio_context);
} }
if (!failover) { if (!failover) {

View File

@ -162,7 +162,7 @@ static int coroutine_fn throttle_co_pwritev_compressed(BlockDriverState *bs,
BDRV_REQ_WRITE_COMPRESSED); BDRV_REQ_WRITE_COMPRESSED);
} }
static int throttle_co_flush(BlockDriverState *bs) static int coroutine_fn throttle_co_flush(BlockDriverState *bs)
{ {
return bdrv_co_flush(bs->file->bs); return bdrv_co_flush(bs->file->bs);
} }

View File

@ -1787,10 +1787,11 @@ static int coroutine_fn vmdk_co_block_status(BlockDriverState *bs,
return ret; return ret;
} }
static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, static int coroutine_fn
int64_t offset_in_cluster, QEMUIOVector *qiov, vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
uint64_t qiov_offset, uint64_t n_bytes, int64_t offset_in_cluster, QEMUIOVector *qiov,
uint64_t offset) uint64_t qiov_offset, uint64_t n_bytes,
uint64_t offset)
{ {
int ret; int ret;
VmdkGrainMarker *data = NULL; VmdkGrainMarker *data = NULL;
@ -1868,9 +1869,10 @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
return ret; return ret;
} }
static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, static int coroutine_fn
int64_t offset_in_cluster, QEMUIOVector *qiov, vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
int bytes) int64_t offset_in_cluster, QEMUIOVector *qiov,
int bytes)
{ {
int ret; int ret;
int cluster_bytes, buf_bytes; int cluster_bytes, buf_bytes;
@ -2015,9 +2017,9 @@ fail:
* *
* Returns: error code with 0 for success. * Returns: error code with 0 for success.
*/ */
static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset, static int coroutine_fn vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, uint64_t bytes, QEMUIOVector *qiov,
bool zeroed, bool zero_dry_run) bool zeroed, bool zero_dry_run)
{ {
BDRVVmdkState *s = bs->opaque; BDRVVmdkState *s = bs->opaque;
VmdkExtent *extent = NULL; VmdkExtent *extent = NULL;

View File

@ -150,14 +150,12 @@ void blockdev_mark_auto_del(BlockBackend *blk)
return; return;
} }
for (job = block_job_next(NULL); job; job = block_job_next(job)) { JOB_LOCK_GUARD();
for (job = block_job_next_locked(NULL); job;
job = block_job_next_locked(job)) {
if (block_job_has_bdrv(job, blk_bs(blk))) { if (block_job_has_bdrv(job, blk_bs(blk))) {
AioContext *aio_context = job->job.aio_context; job_cancel_locked(&job->job, false);
aio_context_acquire(aio_context);
job_cancel(&job->job, false);
aio_context_release(aio_context);
} }
} }
@ -455,6 +453,17 @@ static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags,
} }
} }
static OnOffAuto account_get_opt(QemuOpts *opts, const char *name)
{
if (!qemu_opt_find(opts, name)) {
return ON_OFF_AUTO_AUTO;
}
if (qemu_opt_get_bool(opts, name, true)) {
return ON_OFF_AUTO_ON;
}
return ON_OFF_AUTO_OFF;
}
/* Takes the ownership of bs_opts */ /* Takes the ownership of bs_opts */
static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
Error **errp) Error **errp)
@ -462,7 +471,7 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
const char *buf; const char *buf;
int bdrv_flags = 0; int bdrv_flags = 0;
int on_read_error, on_write_error; int on_read_error, on_write_error;
bool account_invalid, account_failed; OnOffAuto account_invalid, account_failed;
bool writethrough, read_only; bool writethrough, read_only;
BlockBackend *blk; BlockBackend *blk;
BlockDriverState *bs; BlockDriverState *bs;
@ -496,8 +505,8 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
/* extract parameters */ /* extract parameters */
snapshot = qemu_opt_get_bool(opts, "snapshot", 0); snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
account_invalid = qemu_opt_get_bool(opts, "stats-account-invalid", true); account_invalid = account_get_opt(opts, "stats-account-invalid");
account_failed = qemu_opt_get_bool(opts, "stats-account-failed", true); account_failed = account_get_opt(opts, "stats-account-failed");
writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true); writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true);
@ -1833,14 +1842,7 @@ static void drive_backup_abort(BlkActionState *common)
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
if (state->job) { if (state->job) {
AioContext *aio_context;
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
job_cancel_sync(&state->job->job, true); job_cancel_sync(&state->job->job, true);
aio_context_release(aio_context);
} }
} }
@ -1934,14 +1936,7 @@ static void blockdev_backup_abort(BlkActionState *common)
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
if (state->job) { if (state->job) {
AioContext *aio_context;
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
job_cancel_sync(&state->job->job, true); job_cancel_sync(&state->job->job, true);
aio_context_release(aio_context);
} }
} }
@ -3302,17 +3297,16 @@ out:
aio_context_release(aio_context); aio_context_release(aio_context);
} }
/* Get a block job using its ID and acquire its AioContext */ /*
static BlockJob *find_block_job(const char *id, AioContext **aio_context, * Get a block job using its ID. Called with job_mutex held.
Error **errp) */
static BlockJob *find_block_job_locked(const char *id, Error **errp)
{ {
BlockJob *job; BlockJob *job;
assert(id != NULL); assert(id != NULL);
*aio_context = NULL; job = block_job_get_locked(id);
job = block_job_get(id);
if (!job) { if (!job) {
error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
@ -3320,30 +3314,30 @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context,
return NULL; return NULL;
} }
*aio_context = block_job_get_aio_context(job);
aio_context_acquire(*aio_context);
return job; return job;
} }
void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
{ {
AioContext *aio_context; BlockJob *job;
BlockJob *job = find_block_job(device, &aio_context, errp);
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
if (!job) { if (!job) {
return; return;
} }
block_job_set_speed(job, speed, errp); block_job_set_speed_locked(job, speed, errp);
aio_context_release(aio_context);
} }
void qmp_block_job_cancel(const char *device, void qmp_block_job_cancel(const char *device,
bool has_force, bool force, Error **errp) bool has_force, bool force, Error **errp)
{ {
AioContext *aio_context; BlockJob *job;
BlockJob *job = find_block_job(device, &aio_context, errp);
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
if (!job) { if (!job) {
return; return;
@ -3353,97 +3347,94 @@ void qmp_block_job_cancel(const char *device,
force = false; force = false;
} }
if (job_user_paused(&job->job) && !force) { if (job_user_paused_locked(&job->job) && !force) {
error_setg(errp, "The block job for device '%s' is currently paused", error_setg(errp, "The block job for device '%s' is currently paused",
device); device);
goto out; return;
} }
trace_qmp_block_job_cancel(job); trace_qmp_block_job_cancel(job);
job_user_cancel(&job->job, force, errp); job_user_cancel_locked(&job->job, force, errp);
out:
aio_context_release(aio_context);
} }
void qmp_block_job_pause(const char *device, Error **errp) void qmp_block_job_pause(const char *device, Error **errp)
{ {
AioContext *aio_context; BlockJob *job;
BlockJob *job = find_block_job(device, &aio_context, errp);
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
if (!job) { if (!job) {
return; return;
} }
trace_qmp_block_job_pause(job); trace_qmp_block_job_pause(job);
job_user_pause(&job->job, errp); job_user_pause_locked(&job->job, errp);
aio_context_release(aio_context);
} }
void qmp_block_job_resume(const char *device, Error **errp) void qmp_block_job_resume(const char *device, Error **errp)
{ {
AioContext *aio_context; BlockJob *job;
BlockJob *job = find_block_job(device, &aio_context, errp);
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
if (!job) { if (!job) {
return; return;
} }
trace_qmp_block_job_resume(job); trace_qmp_block_job_resume(job);
job_user_resume(&job->job, errp); job_user_resume_locked(&job->job, errp);
aio_context_release(aio_context);
} }
void qmp_block_job_complete(const char *device, Error **errp) void qmp_block_job_complete(const char *device, Error **errp)
{ {
AioContext *aio_context; BlockJob *job;
BlockJob *job = find_block_job(device, &aio_context, errp);
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
if (!job) { if (!job) {
return; return;
} }
trace_qmp_block_job_complete(job); trace_qmp_block_job_complete(job);
job_complete(&job->job, errp); job_complete_locked(&job->job, errp);
aio_context_release(aio_context);
} }
void qmp_block_job_finalize(const char *id, Error **errp) void qmp_block_job_finalize(const char *id, Error **errp)
{ {
AioContext *aio_context; BlockJob *job;
BlockJob *job = find_block_job(id, &aio_context, errp);
JOB_LOCK_GUARD();
job = find_block_job_locked(id, errp);
if (!job) { if (!job) {
return; return;
} }
trace_qmp_block_job_finalize(job); trace_qmp_block_job_finalize(job);
job_ref(&job->job); job_ref_locked(&job->job);
job_finalize(&job->job, errp); job_finalize_locked(&job->job, errp);
/* job_unref_locked(&job->job);
* Job's context might have changed via job_finalize (and job_txn_apply
* automatically acquires the new one), so make sure we release the correct
* one.
*/
aio_context = block_job_get_aio_context(job);
job_unref(&job->job);
aio_context_release(aio_context);
} }
void qmp_block_job_dismiss(const char *id, Error **errp) void qmp_block_job_dismiss(const char *id, Error **errp)
{ {
AioContext *aio_context; BlockJob *bjob;
BlockJob *bjob = find_block_job(id, &aio_context, errp);
Job *job; Job *job;
JOB_LOCK_GUARD();
bjob = find_block_job_locked(id, errp);
if (!bjob) { if (!bjob) {
return; return;
} }
trace_qmp_block_job_dismiss(bjob); trace_qmp_block_job_dismiss(bjob);
job = &bjob->job; job = &bjob->job;
job_dismiss(&job, errp); job_dismiss_locked(&job, errp);
aio_context_release(aio_context);
} }
void qmp_change_backing_file(const char *device, void qmp_change_backing_file(const char *device,
@ -3720,17 +3711,16 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
BlockJobInfoList *head = NULL, **tail = &head; BlockJobInfoList *head = NULL, **tail = &head;
BlockJob *job; BlockJob *job;
for (job = block_job_next(NULL); job; job = block_job_next(job)) { JOB_LOCK_GUARD();
for (job = block_job_next_locked(NULL); job;
job = block_job_next_locked(job)) {
BlockJobInfo *value; BlockJobInfo *value;
AioContext *aio_context;
if (block_job_is_internal(job)) { if (block_job_is_internal(job)) {
continue; continue;
} }
aio_context = block_job_get_aio_context(job); value = block_job_query_locked(job, errp);
aio_context_acquire(aio_context);
value = block_job_query(job, errp);
aio_context_release(aio_context);
if (!value) { if (!value) {
qapi_free_BlockJobInfoList(head); qapi_free_BlockJobInfoList(head);
return NULL; return NULL;

View File

@ -36,21 +36,6 @@
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/timer.h" #include "qemu/timer.h"
/*
* The block job API is composed of two categories of functions.
*
* The first includes functions used by the monitor. The monitor is
* peculiar in that it accesses the block job list with block_job_get, and
* therefore needs consistency across block_job_get and the actual operation
* (e.g. block_job_set_speed). The consistency is achieved with
* aio_context_acquire/release. These functions are declared in blockjob.h.
*
* The second includes functions used by the block job drivers and sometimes
* by the core block layer. These do not care about locking, because the
* whole coroutine runs under the AioContext lock, and are declared in
* blockjob_int.h.
*/
static bool is_block_job(Job *job) static bool is_block_job(Job *job)
{ {
return job_type(job) == JOB_TYPE_BACKUP || return job_type(job) == JOB_TYPE_BACKUP ||
@ -59,21 +44,21 @@ static bool is_block_job(Job *job)
job_type(job) == JOB_TYPE_STREAM; job_type(job) == JOB_TYPE_STREAM;
} }
BlockJob *block_job_next(BlockJob *bjob) BlockJob *block_job_next_locked(BlockJob *bjob)
{ {
Job *job = bjob ? &bjob->job : NULL; Job *job = bjob ? &bjob->job : NULL;
GLOBAL_STATE_CODE(); GLOBAL_STATE_CODE();
do { do {
job = job_next(job); job = job_next_locked(job);
} while (job && !is_block_job(job)); } while (job && !is_block_job(job));
return job ? container_of(job, BlockJob, job) : NULL; return job ? container_of(job, BlockJob, job) : NULL;
} }
BlockJob *block_job_get(const char *id) BlockJob *block_job_get_locked(const char *id)
{ {
Job *job = job_get(id); Job *job = job_get_locked(id);
GLOBAL_STATE_CODE(); GLOBAL_STATE_CODE();
if (job && is_block_job(job)) { if (job && is_block_job(job)) {
@ -83,6 +68,12 @@ BlockJob *block_job_get(const char *id)
} }
} }
BlockJob *block_job_get(const char *id)
{
JOB_LOCK_GUARD();
return block_job_get_locked(id);
}
void block_job_free(Job *job) void block_job_free(Job *job)
{ {
BlockJob *bjob = container_of(job, BlockJob, job); BlockJob *bjob = container_of(job, BlockJob, job);
@ -114,8 +105,10 @@ static bool child_job_drained_poll(BdrvChild *c)
/* An inactive or completed job doesn't have any pending requests. Jobs /* An inactive or completed job doesn't have any pending requests. Jobs
* with !job->busy are either already paused or have a pause point after * with !job->busy are either already paused or have a pause point after
* being reentered, so no job driver code will run before they pause. */ * being reentered, so no job driver code will run before they pause. */
if (!job->busy || job_is_completed(job)) { WITH_JOB_LOCK_GUARD() {
return false; if (!job->busy || job_is_completed_locked(job)) {
return false;
}
} }
/* Otherwise, assume that it isn't fully stopped yet, but allow the job to /* Otherwise, assume that it isn't fully stopped yet, but allow the job to
@ -163,12 +156,13 @@ static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx,
bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore); bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore);
} }
job->job.aio_context = ctx; job_set_aio_context(&job->job, ctx);
} }
static AioContext *child_job_get_parent_aio_context(BdrvChild *c) static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
{ {
BlockJob *job = c->opaque; BlockJob *job = c->opaque;
GLOBAL_STATE_CODE();
return job->job.aio_context; return job->job.aio_context;
} }
@ -250,7 +244,8 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
return 0; return 0;
} }
static void block_job_on_idle(Notifier *n, void *opaque) /* Called with job_mutex lock held. */
static void block_job_on_idle_locked(Notifier *n, void *opaque)
{ {
aio_wait_kick(); aio_wait_kick();
} }
@ -271,14 +266,14 @@ static bool job_timer_pending(Job *job)
return timer_pending(&job->sleep_timer); return timer_pending(&job->sleep_timer);
} }
bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
{ {
const BlockJobDriver *drv = block_job_driver(job); const BlockJobDriver *drv = block_job_driver(job);
int64_t old_speed = job->speed; int64_t old_speed = job->speed;
GLOBAL_STATE_CODE(); GLOBAL_STATE_CODE();
if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
return false; return false;
} }
if (speed < 0) { if (speed < 0) {
@ -292,7 +287,9 @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
job->speed = speed; job->speed = speed;
if (drv->set_speed) { if (drv->set_speed) {
job_unlock();
drv->set_speed(job, speed); drv->set_speed(job, speed);
job_lock();
} }
if (speed && speed <= old_speed) { if (speed && speed <= old_speed) {
@ -300,18 +297,24 @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
} }
/* kick only if a timer is pending */ /* kick only if a timer is pending */
job_enter_cond(&job->job, job_timer_pending); job_enter_cond_locked(&job->job, job_timer_pending);
return true; return true;
} }
static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
{
JOB_LOCK_GUARD();
return block_job_set_speed_locked(job, speed, errp);
}
int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n) int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
{ {
IO_CODE(); IO_CODE();
return ratelimit_calculate_delay(&job->limit, n); return ratelimit_calculate_delay(&job->limit, n);
} }
BlockJobInfo *block_job_query(BlockJob *job, Error **errp) BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
{ {
BlockJobInfo *info; BlockJobInfo *info;
uint64_t progress_current, progress_total; uint64_t progress_current, progress_total;
@ -329,13 +332,13 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
info = g_new0(BlockJobInfo, 1); info = g_new0(BlockJobInfo, 1);
info->type = g_strdup(job_type_str(&job->job)); info->type = g_strdup(job_type_str(&job->job));
info->device = g_strdup(job->job.id); info->device = g_strdup(job->job.id);
info->busy = qatomic_read(&job->job.busy); info->busy = job->job.busy;
info->paused = job->job.pause_count > 0; info->paused = job->job.pause_count > 0;
info->offset = progress_current; info->offset = progress_current;
info->len = progress_total; info->len = progress_total;
info->speed = job->speed; info->speed = job->speed;
info->io_status = job->iostatus; info->io_status = job->iostatus;
info->ready = job_is_ready(&job->job), info->ready = job_is_ready_locked(&job->job),
info->status = job->job.status; info->status = job->job.status;
info->auto_finalize = job->job.auto_finalize; info->auto_finalize = job->job.auto_finalize;
info->auto_dismiss = job->job.auto_dismiss; info->auto_dismiss = job->job.auto_dismiss;
@ -348,7 +351,8 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
return info; return info;
} }
static void block_job_iostatus_set_err(BlockJob *job, int error) /* Called with job lock held */
static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
{ {
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
@ -356,7 +360,8 @@ static void block_job_iostatus_set_err(BlockJob *job, int error)
} }
} }
static void block_job_event_cancelled(Notifier *n, void *opaque) /* Called with job_mutex lock held. */
static void block_job_event_cancelled_locked(Notifier *n, void *opaque)
{ {
BlockJob *job = opaque; BlockJob *job = opaque;
uint64_t progress_current, progress_total; uint64_t progress_current, progress_total;
@ -375,7 +380,8 @@ static void block_job_event_cancelled(Notifier *n, void *opaque)
job->speed); job->speed);
} }
static void block_job_event_completed(Notifier *n, void *opaque) /* Called with job_mutex lock held. */
static void block_job_event_completed_locked(Notifier *n, void *opaque)
{ {
BlockJob *job = opaque; BlockJob *job = opaque;
const char *msg = NULL; const char *msg = NULL;
@ -401,7 +407,8 @@ static void block_job_event_completed(Notifier *n, void *opaque)
msg); msg);
} }
static void block_job_event_pending(Notifier *n, void *opaque) /* Called with job_mutex lock held. */
static void block_job_event_pending_locked(Notifier *n, void *opaque)
{ {
BlockJob *job = opaque; BlockJob *job = opaque;
@ -413,7 +420,8 @@ static void block_job_event_pending(Notifier *n, void *opaque)
job->job.id); job->job.id);
} }
static void block_job_event_ready(Notifier *n, void *opaque) /* Called with job_mutex lock held. */
static void block_job_event_ready_locked(Notifier *n, void *opaque)
{ {
BlockJob *job = opaque; BlockJob *job = opaque;
uint64_t progress_current, progress_total; uint64_t progress_current, progress_total;
@ -433,11 +441,6 @@ static void block_job_event_ready(Notifier *n, void *opaque)
} }
/*
* API for block job drivers and the block layer. These functions are
* declared in blockjob_int.h.
*/
void *block_job_create(const char *job_id, const BlockJobDriver *driver, void *block_job_create(const char *job_id, const BlockJobDriver *driver,
JobTxn *txn, BlockDriverState *bs, uint64_t perm, JobTxn *txn, BlockDriverState *bs, uint64_t perm,
uint64_t shared_perm, int64_t speed, int flags, uint64_t shared_perm, int64_t speed, int flags,
@ -463,19 +466,21 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
ratelimit_init(&job->limit); ratelimit_init(&job->limit);
job->finalize_cancelled_notifier.notify = block_job_event_cancelled; job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked;
job->finalize_completed_notifier.notify = block_job_event_completed; job->finalize_completed_notifier.notify = block_job_event_completed_locked;
job->pending_notifier.notify = block_job_event_pending; job->pending_notifier.notify = block_job_event_pending_locked;
job->ready_notifier.notify = block_job_event_ready; job->ready_notifier.notify = block_job_event_ready_locked;
job->idle_notifier.notify = block_job_on_idle; job->idle_notifier.notify = block_job_on_idle_locked;
notifier_list_add(&job->job.on_finalize_cancelled, WITH_JOB_LOCK_GUARD() {
&job->finalize_cancelled_notifier); notifier_list_add(&job->job.on_finalize_cancelled,
notifier_list_add(&job->job.on_finalize_completed, &job->finalize_cancelled_notifier);
&job->finalize_completed_notifier); notifier_list_add(&job->job.on_finalize_completed,
notifier_list_add(&job->job.on_pending, &job->pending_notifier); &job->finalize_completed_notifier);
notifier_list_add(&job->job.on_ready, &job->ready_notifier); notifier_list_add(&job->job.on_pending, &job->pending_notifier);
notifier_list_add(&job->job.on_idle, &job->idle_notifier); notifier_list_add(&job->job.on_ready, &job->ready_notifier);
notifier_list_add(&job->job.on_idle, &job->idle_notifier);
}
error_setg(&job->blocker, "block device is in use by block job: %s", error_setg(&job->blocker, "block device is in use by block job: %s",
job_type_str(&job->job)); job_type_str(&job->job));
@ -498,7 +503,7 @@ fail:
return NULL; return NULL;
} }
void block_job_iostatus_reset(BlockJob *job) void block_job_iostatus_reset_locked(BlockJob *job)
{ {
GLOBAL_STATE_CODE(); GLOBAL_STATE_CODE();
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
@ -508,6 +513,12 @@ void block_job_iostatus_reset(BlockJob *job)
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
} }
static void block_job_iostatus_reset(BlockJob *job)
{
JOB_LOCK_GUARD();
block_job_iostatus_reset_locked(job);
}
void block_job_user_resume(Job *job) void block_job_user_resume(Job *job)
{ {
BlockJob *bjob = container_of(job, BlockJob, job); BlockJob *bjob = container_of(job, BlockJob, job);
@ -546,12 +557,17 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
action); action);
} }
if (action == BLOCK_ERROR_ACTION_STOP) { if (action == BLOCK_ERROR_ACTION_STOP) {
if (!job->job.user_paused) { WITH_JOB_LOCK_GUARD() {
job_pause(&job->job); if (!job->job.user_paused) {
/* make the pause user visible, which will be resumed from QMP. */ job_pause_locked(&job->job);
job->job.user_paused = true; /*
* make the pause user visible, which will be
* resumed from QMP.
*/
job->job.user_paused = true;
}
block_job_iostatus_set_err_locked(job, error);
} }
block_job_iostatus_set_err(job, error);
} }
return action; return action;
} }

View File

@ -87,6 +87,9 @@
#define BUF_SIZE 256 #define BUF_SIZE 256
#define X_MAX 84
#define Y_MAX 1
struct BaumChardev { struct BaumChardev {
Chardev parent; Chardev parent;
@ -244,11 +247,11 @@ static int baum_deferred_init(BaumChardev *baum)
brlapi_perror("baum: brlapi__getDisplaySize"); brlapi_perror("baum: brlapi__getDisplaySize");
return 0; return 0;
} }
if (baum->y > 1) { if (baum->y > Y_MAX) {
baum->y = 1; baum->y = Y_MAX;
} }
if (baum->x > 84) { if (baum->x > X_MAX) {
baum->x = 84; baum->x = X_MAX;
} }
con = qemu_console_lookup_by_index(0); con = qemu_console_lookup_by_index(0);
@ -296,7 +299,8 @@ static void baum_chr_accept_input(struct Chardev *chr)
static void baum_write_packet(BaumChardev *baum, const uint8_t *buf, int len) static void baum_write_packet(BaumChardev *baum, const uint8_t *buf, int len)
{ {
Chardev *chr = CHARDEV(baum); Chardev *chr = CHARDEV(baum);
uint8_t io_buf[1 + 2 * len], *cur = io_buf; g_autofree uint8_t *io_buf = g_malloc(1 + 2 * len);
uint8_t *cur = io_buf;
int room; int room;
*cur++ = ESC; *cur++ = ESC;
while (len--) while (len--)
@ -380,9 +384,9 @@ static int baum_eat_packet(BaumChardev *baum, const uint8_t *buf, int len)
switch (req) { switch (req) {
case BAUM_REQ_DisplayData: case BAUM_REQ_DisplayData:
{ {
uint8_t cells[baum->x * baum->y], c; uint8_t cells[X_MAX * Y_MAX], c;
uint8_t text[baum->x * baum->y]; uint8_t text[X_MAX * Y_MAX];
uint8_t zero[baum->x * baum->y]; uint8_t zero[X_MAX * Y_MAX];
int cursor = BRLAPI_CURSOR_OFF; int cursor = BRLAPI_CURSOR_OFF;
int i; int i;
@ -405,7 +409,7 @@ static int baum_eat_packet(BaumChardev *baum, const uint8_t *buf, int len)
} }
timer_del(baum->cellCount_timer); timer_del(baum->cellCount_timer);
memset(zero, 0, sizeof(zero)); memset(zero, 0, baum->x * baum->y);
brlapi_writeArguments_t wa = { brlapi_writeArguments_t wa = {
.displayNumber = BRLAPI_DISPLAY_DEFAULT, .displayNumber = BRLAPI_DISPLAY_DEFAULT,

View File

@ -557,12 +557,10 @@ static char *qemu_chr_compute_filename(SocketChardev *s)
const char *left = "", *right = ""; const char *left = "", *right = "";
switch (ss->ss_family) { switch (ss->ss_family) {
#ifndef _WIN32
case AF_UNIX: case AF_UNIX:
return g_strdup_printf("unix:%s%s", return g_strdup_printf("unix:%s%s",
((struct sockaddr_un *)(ss))->sun_path, ((struct sockaddr_un *)(ss))->sun_path,
s->is_listen ? ",server=on" : ""); s->is_listen ? ",server=on" : "");
#endif
case AF_INET6: case AF_INET6:
left = "["; left = "[";
right = "]"; right = "]";
@ -1372,10 +1370,12 @@ static void qmp_chardev_open_socket(Chardev *chr,
} }
qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE); qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE);
#ifndef _WIN32
/* TODO SOCKET_ADDRESS_FD where fd has AF_UNIX */ /* TODO SOCKET_ADDRESS_FD where fd has AF_UNIX */
if (addr->type == SOCKET_ADDRESS_TYPE_UNIX) { if (addr->type == SOCKET_ADDRESS_TYPE_UNIX) {
qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_FD_PASS); qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_FD_PASS);
} }
#endif
/* /*
* In the chardev-change special-case, we shouldn't register a new yank * In the chardev-change special-case, we shouldn't register a new yank

View File

@ -193,7 +193,7 @@ int qemu_chr_be_can_write(Chardev *s)
return be->chr_can_read(be->opaque); return be->chr_can_read(be->opaque);
} }
void qemu_chr_be_write_impl(Chardev *s, uint8_t *buf, int len) void qemu_chr_be_write_impl(Chardev *s, const uint8_t *buf, int len)
{ {
CharBackend *be = s->be; CharBackend *be = s->be;
@ -202,7 +202,7 @@ void qemu_chr_be_write_impl(Chardev *s, uint8_t *buf, int len)
} }
} }
void qemu_chr_be_write(Chardev *s, uint8_t *buf, int len) void qemu_chr_be_write(Chardev *s, const uint8_t *buf, int len)
{ {
if (qemu_chr_replay(s)) { if (qemu_chr_replay(s)) {
if (replay_mode == REPLAY_MODE_PLAY) { if (replay_mode == REPLAY_MODE_PLAY) {

View File

@ -24,23 +24,45 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "qemu/fifo8.h"
#include "chardev/char.h" #include "chardev/char.h"
#include "chardev/char-serial.h"
#include "ui/console.h" #include "ui/console.h"
#include "ui/input.h" #include "ui/input.h"
#include "qom/object.h" #include "qom/object.h"
#define MSMOUSE_LO6(n) ((n) & 0x3f) #define MSMOUSE_LO6(n) ((n) & 0x3f)
#define MSMOUSE_HI2(n) (((n) & 0xc0) >> 6) #define MSMOUSE_HI2(n) (((n) & 0xc0) >> 6)
#define MSMOUSE_PWR(cm) (cm & (CHR_TIOCM_RTS | CHR_TIOCM_DTR))
/* Serial PnP for 6 bit devices/mice sends all ASCII chars - 0x20 */
#define M(c) (c - 0x20)
/* Serial fifo size. */
#define MSMOUSE_BUF_SZ 64
/* Mouse ID: Send "M3" cause we behave like a 3 button logitech mouse. */
const uint8_t mouse_id[] = {'M', '3'};
/*
* PnP start "(", PnP version (1.0), vendor ID, product ID, '\\',
* serial ID (omitted), '\\', MS class name, '\\', driver ID (omitted), '\\',
* product description, checksum, ")"
* Missing parts are inserted later.
*/
const uint8_t pnp_data[] = {M('('), 1, '$', M('Q'), M('M'), M('U'),
M('0'), M('0'), M('0'), M('1'),
M('\\'), M('\\'),
M('M'), M('O'), M('U'), M('S'), M('E'),
M('\\'), M('\\')};
struct MouseChardev { struct MouseChardev {
Chardev parent; Chardev parent;
QemuInputHandlerState *hs; QemuInputHandlerState *hs;
int tiocm;
int axis[INPUT_AXIS__MAX]; int axis[INPUT_AXIS__MAX];
bool btns[INPUT_BUTTON__MAX]; bool btns[INPUT_BUTTON__MAX];
bool btnc[INPUT_BUTTON__MAX]; bool btnc[INPUT_BUTTON__MAX];
uint8_t outbuf[32]; Fifo8 outbuf;
int outlen;
}; };
typedef struct MouseChardev MouseChardev; typedef struct MouseChardev MouseChardev;
@ -51,20 +73,18 @@ DECLARE_INSTANCE_CHECKER(MouseChardev, MOUSE_CHARDEV,
static void msmouse_chr_accept_input(Chardev *chr) static void msmouse_chr_accept_input(Chardev *chr)
{ {
MouseChardev *mouse = MOUSE_CHARDEV(chr); MouseChardev *mouse = MOUSE_CHARDEV(chr);
int len; uint32_t len, avail;
len = qemu_chr_be_can_write(chr); len = qemu_chr_be_can_write(chr);
if (len > mouse->outlen) { avail = fifo8_num_used(&mouse->outbuf);
len = mouse->outlen; while (len > 0 && avail > 0) {
} const uint8_t *buf;
if (!len) { uint32_t size;
return;
}
qemu_chr_be_write(chr, mouse->outbuf, len); buf = fifo8_pop_buf(&mouse->outbuf, MIN(len, avail), &size);
mouse->outlen -= len; qemu_chr_be_write(chr, buf, size);
if (mouse->outlen) { len = qemu_chr_be_can_write(chr);
memmove(mouse->outbuf, mouse->outbuf + len, mouse->outlen); avail -= size;
} }
} }
@ -91,12 +111,11 @@ static void msmouse_queue_event(MouseChardev *mouse)
mouse->btnc[INPUT_BUTTON_MIDDLE]) { mouse->btnc[INPUT_BUTTON_MIDDLE]) {
bytes[3] |= (mouse->btns[INPUT_BUTTON_MIDDLE] ? 0x20 : 0x00); bytes[3] |= (mouse->btns[INPUT_BUTTON_MIDDLE] ? 0x20 : 0x00);
mouse->btnc[INPUT_BUTTON_MIDDLE] = false; mouse->btnc[INPUT_BUTTON_MIDDLE] = false;
count = 4; count++;
} }
if (mouse->outlen <= sizeof(mouse->outbuf) - count) { if (fifo8_num_free(&mouse->outbuf) >= count) {
memcpy(mouse->outbuf + mouse->outlen, bytes, count); fifo8_push_all(&mouse->outbuf, bytes, count);
mouse->outlen += count;
} else { } else {
/* queue full -> drop event */ /* queue full -> drop event */
} }
@ -109,6 +128,11 @@ static void msmouse_input_event(DeviceState *dev, QemuConsole *src,
InputMoveEvent *move; InputMoveEvent *move;
InputBtnEvent *btn; InputBtnEvent *btn;
/* Ignore events if serial mouse powered down. */
if (!MSMOUSE_PWR(mouse->tiocm)) {
return;
}
switch (evt->type) { switch (evt->type) {
case INPUT_EVENT_KIND_REL: case INPUT_EVENT_KIND_REL:
move = evt->u.rel.data; move = evt->u.rel.data;
@ -132,6 +156,11 @@ static void msmouse_input_sync(DeviceState *dev)
MouseChardev *mouse = MOUSE_CHARDEV(dev); MouseChardev *mouse = MOUSE_CHARDEV(dev);
Chardev *chr = CHARDEV(dev); Chardev *chr = CHARDEV(dev);
/* Ignore events if serial mouse powered down. */
if (!MSMOUSE_PWR(mouse->tiocm)) {
return;
}
msmouse_queue_event(mouse); msmouse_queue_event(mouse);
msmouse_chr_accept_input(chr); msmouse_chr_accept_input(chr);
} }
@ -142,13 +171,6 @@ static int msmouse_chr_write(struct Chardev *s, const uint8_t *buf, int len)
return len; return len;
} }
static void char_msmouse_finalize(Object *obj)
{
MouseChardev *mouse = MOUSE_CHARDEV(obj);
qemu_input_handler_unregister(mouse->hs);
}
static QemuInputHandler msmouse_handler = { static QemuInputHandler msmouse_handler = {
.name = "QEMU Microsoft Mouse", .name = "QEMU Microsoft Mouse",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
@ -156,6 +178,81 @@ static QemuInputHandler msmouse_handler = {
.sync = msmouse_input_sync, .sync = msmouse_input_sync,
}; };
static int msmouse_ioctl(Chardev *chr, int cmd, void *arg)
{
MouseChardev *mouse = MOUSE_CHARDEV(chr);
int c, i, j;
uint8_t bytes[MSMOUSE_BUF_SZ / 2];
int *targ = (int *)arg;
const uint8_t hexchr[16] = {M('0'), M('1'), M('2'), M('3'), M('4'), M('5'),
M('6'), M('7'), M('8'), M('9'), M('A'), M('B'),
M('C'), M('D'), M('E'), M('F')};
switch (cmd) {
case CHR_IOCTL_SERIAL_SET_TIOCM:
c = mouse->tiocm;
mouse->tiocm = *(int *)arg;
if (MSMOUSE_PWR(mouse->tiocm)) {
if (!MSMOUSE_PWR(c)) {
/*
* Power on after reset: Send ID and PnP data
* No need to check fifo space as it is empty at this point.
*/
fifo8_push_all(&mouse->outbuf, mouse_id, sizeof(mouse_id));
/* Add PnP data: */
fifo8_push_all(&mouse->outbuf, pnp_data, sizeof(pnp_data));
/*
* Add device description from qemu handler name.
* Make sure this all fits into the queue beforehand!
*/
c = M(')');
for (i = 0; msmouse_handler.name[i]; i++) {
bytes[i] = M(msmouse_handler.name[i]);
c += bytes[i];
}
/* Calc more of checksum */
for (j = 0; j < sizeof(pnp_data); j++) {
c += pnp_data[j];
}
c &= 0xff;
bytes[i++] = hexchr[c >> 4];
bytes[i++] = hexchr[c & 0x0f];
bytes[i++] = M(')');
fifo8_push_all(&mouse->outbuf, bytes, i);
/* Start sending data to serial. */
msmouse_chr_accept_input(chr);
}
break;
}
/*
* Reset mouse buffers on power down.
* Mouse won't send anything without power.
*/
fifo8_reset(&mouse->outbuf);
memset(mouse->axis, 0, sizeof(mouse->axis));
memset(mouse->btns, false, sizeof(mouse->btns));
memset(mouse->btnc, false, sizeof(mouse->btns));
break;
case CHR_IOCTL_SERIAL_GET_TIOCM:
/* Remember line control status. */
*targ = mouse->tiocm;
break;
default:
return -ENOTSUP;
}
return 0;
}
static void char_msmouse_finalize(Object *obj)
{
MouseChardev *mouse = MOUSE_CHARDEV(obj);
if (mouse->hs) {
qemu_input_handler_unregister(mouse->hs);
}
fifo8_destroy(&mouse->outbuf);
}
static void msmouse_chr_open(Chardev *chr, static void msmouse_chr_open(Chardev *chr,
ChardevBackend *backend, ChardevBackend *backend,
bool *be_opened, bool *be_opened,
@ -166,6 +263,8 @@ static void msmouse_chr_open(Chardev *chr,
*be_opened = false; *be_opened = false;
mouse->hs = qemu_input_handler_register((DeviceState *)mouse, mouse->hs = qemu_input_handler_register((DeviceState *)mouse,
&msmouse_handler); &msmouse_handler);
mouse->tiocm = 0;
fifo8_create(&mouse->outbuf, MSMOUSE_BUF_SZ);
} }
static void char_msmouse_class_init(ObjectClass *oc, void *data) static void char_msmouse_class_init(ObjectClass *oc, void *data)
@ -175,6 +274,7 @@ static void char_msmouse_class_init(ObjectClass *oc, void *data)
cc->open = msmouse_chr_open; cc->open = msmouse_chr_open;
cc->chr_write = msmouse_chr_write; cc->chr_write = msmouse_chr_write;
cc->chr_accept_input = msmouse_chr_accept_input; cc->chr_accept_input = msmouse_chr_accept_input;
cc->chr_ioctl = msmouse_ioctl;
} }
static const TypeInfo char_msmouse_type_info = { static const TypeInfo char_msmouse_type_info = {

View File

@ -319,7 +319,9 @@ static void wctablet_chr_finalize(Object *obj)
{ {
TabletChardev *tablet = WCTABLET_CHARDEV(obj); TabletChardev *tablet = WCTABLET_CHARDEV(obj);
qemu_input_handler_unregister(tablet->hs); if (tablet->hs) {
qemu_input_handler_unregister(tablet->hs);
}
} }
static void wctablet_chr_open(Chardev *chr, static void wctablet_chr_open(Chardev *chr,

View File

@ -3,3 +3,4 @@
# Boards: # Boards:
# #
CONFIG_OR1K_SIM=y CONFIG_OR1K_SIM=y
CONFIG_OR1K_VIRT=y

View File

@ -1,5 +1,5 @@
TARGET_ARCH=loongarch64 TARGET_ARCH=loongarch64
TARGET_BASE_ARCH=loongarch TARGET_BASE_ARCH=loongarch
TARGET_SUPPORTS_MTTCG=y TARGET_SUPPORTS_MTTCG=y
TARGET_XML_FILES= gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu64.xml TARGET_XML_FILES= gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml
TARGET_NEED_FDT=y TARGET_NEED_FDT=y

View File

@ -1,3 +1,4 @@
TARGET_ARCH=openrisc TARGET_ARCH=openrisc
TARGET_SUPPORTS_MTTCG=y
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y
TARGET_NEED_FDT=y TARGET_NEED_FDT=y

410
configure vendored
View File

@ -57,7 +57,7 @@ GNUmakefile: ;
EOF EOF
cd build cd build
exec $source_path/configure "$@" exec "$source_path/configure" "$@"
fi fi
# Temporary directory used for files created while # Temporary directory used for files created while
@ -67,8 +67,7 @@ fi
# it when configure exits.) # it when configure exits.)
TMPDIR1="config-temp" TMPDIR1="config-temp"
rm -rf "${TMPDIR1}" rm -rf "${TMPDIR1}"
mkdir -p "${TMPDIR1}" if ! mkdir -p "${TMPDIR1}"; then
if [ $? -ne 0 ]; then
echo "ERROR: failed to create temporary directory" echo "ERROR: failed to create temporary directory"
exit 1 exit 1
fi fi
@ -76,7 +75,6 @@ fi
TMPB="qemu-conf" TMPB="qemu-conf"
TMPC="${TMPDIR1}/${TMPB}.c" TMPC="${TMPDIR1}/${TMPB}.c"
TMPO="${TMPDIR1}/${TMPB}.o" TMPO="${TMPDIR1}/${TMPB}.o"
TMPCXX="${TMPDIR1}/${TMPB}.cxx"
TMPM="${TMPDIR1}/${TMPB}.m" TMPM="${TMPDIR1}/${TMPB}.m"
TMPE="${TMPDIR1}/${TMPB}.exe" TMPE="${TMPDIR1}/${TMPB}.exe"
@ -111,7 +109,7 @@ error_exit() {
do_compiler() { do_compiler() {
# Run the compiler, capturing its output to the log. First argument # Run the compiler, capturing its output to the log. First argument
# is compiler binary to execute. # is compiler binary to execute.
local compiler="$1" compiler="$1"
shift shift
if test -n "$BASH_VERSION"; then eval ' if test -n "$BASH_VERSION"; then eval '
echo >>config.log " echo >>config.log "
@ -159,10 +157,6 @@ do_cc() {
do_compiler_werror "$cc" $CPU_CFLAGS "$@" do_compiler_werror "$cc" $CPU_CFLAGS "$@"
} }
do_cxx() {
do_compiler_werror "$cxx" $CPU_CFLAGS "$@"
}
do_objc() { do_objc() {
do_compiler_werror "$objcc" $CPU_CFLAGS "$@" do_compiler_werror "$objcc" $CPU_CFLAGS "$@"
} }
@ -172,24 +166,6 @@ add_to() {
eval $1=\${$1:+\"\$$1 \"}\$2 eval $1=\${$1:+\"\$$1 \"}\$2
} }
update_cxxflags() {
# Set QEMU_CXXFLAGS from QEMU_CFLAGS by filtering out those
# options which some versions of GCC's C++ compiler complain about
# because they only make sense for C programs.
QEMU_CXXFLAGS="-D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS"
CONFIGURE_CXXFLAGS=$(echo "$CONFIGURE_CFLAGS" | sed s/-std=gnu11/-std=gnu++11/)
for arg in $QEMU_CFLAGS; do
case $arg in
-Wstrict-prototypes|-Wmissing-prototypes|-Wnested-externs|\
-Wold-style-declaration|-Wold-style-definition|-Wredundant-decls)
;;
*)
QEMU_CXXFLAGS=${QEMU_CXXFLAGS:+$QEMU_CXXFLAGS }$arg
;;
esac
done
}
compile_object() { compile_object() {
local_cflags="$1" local_cflags="$1"
do_cc $CFLAGS $EXTRA_CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC do_cc $CFLAGS $EXTRA_CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC
@ -311,7 +287,6 @@ pie=""
coroutine="" coroutine=""
plugins="$default_feature" plugins="$default_feature"
meson="" meson=""
meson_args=""
ninja="" ninja=""
bindir="bin" bindir="bin"
skip_meson=no skip_meson=no
@ -321,11 +296,6 @@ vfio_user_server="disabled"
# are included in the automatically generated help message) # are included in the automatically generated help message)
# 1. Track which submodules are needed # 1. Track which submodules are needed
if test "$default_feature" = no ; then
slirp="disabled"
else
slirp="auto"
fi
fdt="auto" fdt="auto"
# 2. Automatically enable/disable other options # 2. Automatically enable/disable other options
@ -695,20 +665,21 @@ meson_option_build_array() {
meson_option_build_array() { meson_option_build_array() {
printf '[' printf '['
(if test "$targetos" == windows; then (if test "$targetos" = windows; then
IFS=\; IFS=\;
else else
IFS=: IFS=:
fi fi
for e in $1; do for e in $1; do
e=${e/'\'/'\\'} printf '"""'
e=${e/\"/'\"'} # backslash escape any '\' and '"' characters
printf '"""%s""",' "$e" printf "%s" "$e" | sed -e 's/\([\"]\)/\\\1/g'
printf '""",'
done) done)
printf ']\n' printf ']\n'
} }
. $source_path/scripts/meson-buildoptions.sh . "$source_path/scripts/meson-buildoptions.sh"
meson_options= meson_options=
meson_option_add() { meson_option_add() {
@ -728,7 +699,7 @@ for opt do
case "$opt" in case "$opt" in
--help|-h) show_help=yes --help|-h) show_help=yes
;; ;;
--version|-V) exec cat $source_path/VERSION --version|-V) exec cat "$source_path/VERSION"
;; ;;
--prefix=*) prefix="$optarg" --prefix=*) prefix="$optarg"
;; ;;
@ -845,14 +816,6 @@ for opt do
;; ;;
--disable-tsan) tsan="no" --disable-tsan) tsan="no"
;; ;;
--disable-slirp) slirp="disabled"
;;
--enable-slirp) slirp="enabled"
;;
--enable-slirp=git) slirp="internal"
;;
--enable-slirp=*) slirp="$optarg"
;;
--disable-tcg) tcg="disabled" --disable-tcg) tcg="disabled"
plugins="no" plugins="no"
;; ;;
@ -1017,7 +980,7 @@ default_target_list=""
mak_wilds="" mak_wilds=""
if [ "$linux_user" != no ]; then if [ "$linux_user" != no ]; then
if [ "$targetos" = linux ] && [ -d $source_path/linux-user/include/host/$cpu ]; then if [ "$targetos" = linux ] && [ -d "$source_path/linux-user/include/host/$cpu" ]; then
linux_user=yes linux_user=yes
elif [ "$linux_user" = yes ]; then elif [ "$linux_user" = yes ]; then
error_exit "linux-user not supported on this architecture" error_exit "linux-user not supported on this architecture"
@ -1027,7 +990,7 @@ if [ "$bsd_user" != no ]; then
if [ "$bsd_user" = "" ]; then if [ "$bsd_user" = "" ]; then
test $targetos = freebsd && bsd_user=yes test $targetos = freebsd && bsd_user=yes
fi fi
if [ "$bsd_user" = yes ] && ! [ -d $source_path/bsd-user/$targetos ]; then if [ "$bsd_user" = yes ] && ! [ -d "$source_path/bsd-user/$targetos" ]; then
error_exit "bsd-user not supported on this host OS" error_exit "bsd-user not supported on this host OS"
fi fi
fi fi
@ -1125,7 +1088,7 @@ exit 0
fi fi
# Remove old dependency files to make sure that they get properly regenerated # Remove old dependency files to make sure that they get properly regenerated
rm -f */config-devices.mak.d rm -f ./*/config-devices.mak.d
if test -z "$python" if test -z "$python"
then then
@ -1143,16 +1106,13 @@ if ! $python -c 'import sys; sys.exit(sys.version_info < (3,6))'; then
"Use --python=/path/to/python to specify a supported Python." "Use --python=/path/to/python to specify a supported Python."
fi fi
# Preserve python version since some functionality is dependent on it
python_version=$($python -c 'import sys; print("%d.%d.%d" % (sys.version_info[0], sys.version_info[1], sys.version_info[2]))' 2>/dev/null)
# Suppress writing compiled files # Suppress writing compiled files
python="$python -B" python="$python -B"
if test -z "$meson"; then if test -z "$meson"; then
if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.59.3; then if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.61.5; then
meson=meson meson=meson
elif test $git_submodules_action != 'ignore' ; then elif test "$git_submodules_action" != 'ignore' ; then
meson=git meson=git
elif test -e "${source_path}/meson/meson.py" ; then elif test -e "${source_path}/meson/meson.py" ; then
meson=internal meson=internal
@ -1289,6 +1249,7 @@ add_to nowarn_flags -Wno-string-plus-int
add_to nowarn_flags -Wno-typedef-redefinition add_to nowarn_flags -Wno-typedef-redefinition
add_to nowarn_flags -Wno-tautological-type-limit-compare add_to nowarn_flags -Wno-tautological-type-limit-compare
add_to nowarn_flags -Wno-psabi add_to nowarn_flags -Wno-psabi
add_to nowarn_flags -Wno-gnu-variable-sized-type-not-at-end
gcc_flags="$warn_flags $nowarn_flags" gcc_flags="$warn_flags $nowarn_flags"
@ -1401,18 +1362,19 @@ EOF
if test "$static" = "yes"; then if test "$static" = "yes"; then
if test "$pie" != "no" && compile_prog "-Werror -fPIE -DPIE" "-static-pie"; then if test "$pie" != "no" && compile_prog "-Werror -fPIE -DPIE" "-static-pie"; then
CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS" CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS"
QEMU_LDFLAGS="-static-pie $QEMU_LDFLAGS"
pie="yes" pie="yes"
elif test "$pie" = "yes"; then elif test "$pie" = "yes"; then
error_exit "-static-pie not available due to missing toolchain support" error_exit "-static-pie not available due to missing toolchain support"
else else
QEMU_LDFLAGS="-static $QEMU_LDFLAGS"
pie="no" pie="no"
fi fi
elif test "$pie" = "no"; then elif test "$pie" = "no"; then
if compile_prog "-Werror -fno-pie" "-no-pie"; then if compile_prog "-Werror -fno-pie" "-no-pie"; then
CONFIGURE_CFLAGS="-fno-pie $CONFIGURE_CFLAGS" CONFIGURE_CFLAGS="-fno-pie $CONFIGURE_CFLAGS"
CONFIGURE_LDFLAGS="-no-pie $CONFIGURE_LDFLAGS" CONFIGURE_LDFLAGS="-no-pie $CONFIGURE_LDFLAGS"
# Meson currently only handles pie as a boolean for now so if we have
# explicitly disabled PIE we need to extend our cflags because it wont.
QEMU_CFLAGS="-fno-pie -no-pie $QEMU_CFLAGS"
fi fi
elif compile_prog "-Werror -fPIE -DPIE" "-pie"; then elif compile_prog "-Werror -fPIE -DPIE" "-pie"; then
CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS" CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS"
@ -1425,12 +1387,6 @@ else
pie="no" pie="no"
fi fi
# Detect support for PT_GNU_RELRO + DT_BIND_NOW.
# The combination is known as "full relro", because .got.plt is read-only too.
if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then
QEMU_LDFLAGS="-Wl,-z,relro -Wl,-z,now $QEMU_LDFLAGS"
fi
########################################## ##########################################
# __sync_fetch_and_and requires at least -march=i486. Many toolchains # __sync_fetch_and_and requires at least -march=i486. Many toolchains
# use i686 as default anyway, but for those that don't, an explicit # use i686 as default anyway, but for those that don't, an explicit
@ -1499,15 +1455,6 @@ if test "$tcg" = "enabled"; then
git_submodules="$git_submodules tests/fp/berkeley-softfloat-3" git_submodules="$git_submodules tests/fp/berkeley-softfloat-3"
fi fi
feature_not_found() {
feature=$1
remedy=$2
error_exit "User requested feature $feature" \
"configure was not able to find it." \
"$remedy"
}
# --- # ---
# big/little endian test # big/little endian test
cat > $TMPC << EOF cat > $TMPC << EOF
@ -1670,7 +1617,7 @@ else
;; ;;
ucontext) ucontext)
if test "$ucontext_works" != "yes"; then if test "$ucontext_works" != "yes"; then
feature_not_found "ucontext" error_exit "'ucontext' backend requested but makecontext not available"
fi fi
;; ;;
sigaltstack) sigaltstack)
@ -1859,23 +1806,18 @@ EOF
fi fi
fi fi
##########################################
# check for slirp
case "$slirp" in
auto | enabled | internal)
# Simpler to always update submodule, even if not needed.
git_submodules="${git_submodules} slirp"
;;
esac
########################################## ##########################################
# functions to probe cross compilers # functions to probe cross compilers
container="no" container="no"
if test $use_containers = "yes"; then if test $use_containers = "yes"; then
if has "docker" || has "podman"; then case $($python "$source_path"/tests/docker/docker.py probe) in
container=$($python $source_path/tests/docker/docker.py probe) *docker) container=docker ;;
podman) container=podman ;;
no) container=no ;;
esac
if test "$container" != "no"; then
docker_py="$python $source_path/tests/docker/docker.py --engine $container"
fi fi
fi fi
@ -2059,7 +2001,7 @@ probe_target_compiler() {
;; ;;
ppc64|ppc64le) ppc64|ppc64le)
container_image=debian-powerpc-test-cross container_image=debian-powerpc-test-cross
container_cross_prefix=powerpc${1#ppc}-linux-gnu- container_cross_prefix=powerpc${target_arch#ppc}-linux-gnu-
container_cross_cc=${container_cross_prefix}gcc-10 container_cross_cc=${container_cross_prefix}gcc-10
;; ;;
riscv64) riscv64)
@ -2107,7 +2049,6 @@ probe_target_compiler() {
: ${container_cross_strip:=${container_cross_prefix}strip} : ${container_cross_strip:=${container_cross_prefix}strip}
done done
local t try
try=cross try=cross
case "$target_arch:$cpu" in case "$target_arch:$cpu" in
aarch64_be:aarch64 | \ aarch64_be:aarch64 | \
@ -2120,8 +2061,8 @@ probe_target_compiler() {
try='native cross' ;; try='native cross' ;;
esac esac
eval "target_cflags=\${cross_cc_cflags_$target_arch}" eval "target_cflags=\${cross_cc_cflags_$target_arch}"
for t in $try; do for thistry in $try; do
case $t in case $thistry in
native) native)
target_cc=$cc target_cc=$cc
target_ccas=$ccas target_ccas=$ccas
@ -2204,7 +2145,6 @@ probe_target_compiler() {
build_static= build_static=
target_cc= target_cc=
target_ccas= target_ccas=
target_cflags=
target_ar= target_ar=
target_as= target_as=
target_ld= target_ld=
@ -2213,54 +2153,54 @@ probe_target_compiler() {
target_ranlib= target_ranlib=
target_strip= target_strip=
fi fi
test -n "$target_cc"
} }
write_target_makefile() { write_target_makefile() {
echo "EXTRA_CFLAGS=$target_cflags" echo "EXTRA_CFLAGS=$target_cflags"
if test -n "$target_cc"; then if test -z "$target_cc" && test -z "$target_as"; then
echo "CC=$target_cc" test -z "$container_image" && error_exit "Internal error: could not find cross compiler for $1?"
echo "CCAS=$target_ccas" echo "$1: docker-image-$container_image" >> Makefile.prereqs
fi if test -n "$container_cross_cc"; then
if test -n "$target_ar"; then echo "CC=$docker_py cc --cc $container_cross_cc -i qemu/$container_image -s $source_path --"
echo "AR=$target_ar" echo "CCAS=$docker_py cc --cc $container_cross_cc -i qemu/$container_image -s $source_path --"
fi fi
if test -n "$target_as"; then echo "AR=$docker_py cc --cc $container_cross_ar -i qemu/$container_image -s $source_path --"
echo "AS=$target_as" echo "AS=$docker_py cc --cc $container_cross_as -i qemu/$container_image -s $source_path --"
fi echo "LD=$docker_py cc --cc $container_cross_ld -i qemu/$container_image -s $source_path --"
if test -n "$target_ld"; then echo "NM=$docker_py cc --cc $container_cross_nm -i qemu/$container_image -s $source_path --"
echo "LD=$target_ld" echo "OBJCOPY=$docker_py cc --cc $container_cross_objcopy -i qemu/$container_image -s $source_path --"
fi echo "RANLIB=$docker_py cc --cc $container_cross_ranlib -i qemu/$container_image -s $source_path --"
if test -n "$target_nm"; then echo "STRIP=$docker_py cc --cc $container_cross_strip -i qemu/$container_image -s $source_path --"
echo "NM=$target_nm" else
fi if test -n "$target_cc"; then
if test -n "$target_objcopy"; then echo "CC=$target_cc"
echo "OBJCOPY=$target_objcopy" echo "CCAS=$target_ccas"
fi fi
if test -n "$target_ranlib"; then if test -n "$target_ar"; then
echo "RANLIB=$target_ranlib" echo "AR=$target_ar"
fi fi
if test -n "$target_strip"; then if test -n "$target_as"; then
echo "STRIP=$target_strip" echo "AS=$target_as"
fi
if test -n "$target_ld"; then
echo "LD=$target_ld"
fi
if test -n "$target_nm"; then
echo "NM=$target_nm"
fi
if test -n "$target_objcopy"; then
echo "OBJCOPY=$target_objcopy"
fi
if test -n "$target_ranlib"; then
echo "RANLIB=$target_ranlib"
fi
if test -n "$target_strip"; then
echo "STRIP=$target_strip"
fi
fi fi
} }
write_container_target_makefile() {
echo "EXTRA_CFLAGS=$target_cflags"
if test -n "$container_cross_cc"; then
echo "CC=\$(DOCKER_SCRIPT) cc --cc $container_cross_cc -i qemu/$container_image -s $source_path --"
echo "CCAS=\$(DOCKER_SCRIPT) cc --cc $container_cross_cc -i qemu/$container_image -s $source_path --"
fi
echo "AR=\$(DOCKER_SCRIPT) cc --cc $container_cross_ar -i qemu/$container_image -s $source_path --"
echo "AS=\$(DOCKER_SCRIPT) cc --cc $container_cross_as -i qemu/$container_image -s $source_path --"
echo "LD=\$(DOCKER_SCRIPT) cc --cc $container_cross_ld -i qemu/$container_image -s $source_path --"
echo "NM=\$(DOCKER_SCRIPT) cc --cc $container_cross_nm -i qemu/$container_image -s $source_path --"
echo "OBJCOPY=\$(DOCKER_SCRIPT) cc --cc $container_cross_objcopy -i qemu/$container_image -s $source_path --"
echo "RANLIB=\$(DOCKER_SCRIPT) cc --cc $container_cross_ranlib -i qemu/$container_image -s $source_path --"
echo "STRIP=\$(DOCKER_SCRIPT) cc --cc $container_cross_strip -i qemu/$container_image -s $source_path --"
}
########################################## ##########################################
# check for vfio_user_server # check for vfio_user_server
@ -2282,13 +2222,6 @@ if test "$fortify_source" = "yes" ; then
QEMU_CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $QEMU_CFLAGS" QEMU_CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $QEMU_CFLAGS"
fi fi
case "$ARCH" in
alpha)
# Ensure there's only a single GP
QEMU_CFLAGS="-msmall-data $QEMU_CFLAGS"
;;
esac
if test "$have_asan" = "yes"; then if test "$have_asan" = "yes"; then
QEMU_CFLAGS="-fsanitize=address $QEMU_CFLAGS" QEMU_CFLAGS="-fsanitize=address $QEMU_CFLAGS"
QEMU_LDFLAGS="-fsanitize=address $QEMU_LDFLAGS" QEMU_LDFLAGS="-fsanitize=address $QEMU_LDFLAGS"
@ -2316,14 +2249,6 @@ if test "$have_ubsan" = "yes"; then
fi fi
########################################## ##########################################
# Exclude --warn-common with TSan to suppress warnings from the TSan libraries.
if test "$solaris" = "no" && test "$tsan" = "no"; then
if $ld --version 2>/dev/null | grep "GNU ld" >/dev/null 2>/dev/null ; then
QEMU_LDFLAGS="-Wl,--warn-common $QEMU_LDFLAGS"
fi
fi
# Guest agent Windows MSI package # Guest agent Windows MSI package
if test "$QEMU_GA_MANUFACTURER" = ""; then if test "$QEMU_GA_MANUFACTURER" = ""; then
@ -2333,7 +2258,7 @@ if test "$QEMU_GA_DISTRO" = ""; then
QEMU_GA_DISTRO=Linux QEMU_GA_DISTRO=Linux
fi fi
if test "$QEMU_GA_VERSION" = ""; then if test "$QEMU_GA_VERSION" = ""; then
QEMU_GA_VERSION=$(cat $source_path/VERSION) QEMU_GA_VERSION=$(cat "$source_path"/VERSION)
fi fi
@ -2349,7 +2274,6 @@ fi
# tests might fail. Prefer to keep the relevant files in their own # tests might fail. Prefer to keep the relevant files in their own
# directory and symlink the directory instead. # directory and symlink the directory instead.
LINKS="Makefile" LINKS="Makefile"
LINKS="$LINKS tests/tcg/Makefile.target"
LINKS="$LINKS pc-bios/optionrom/Makefile" LINKS="$LINKS pc-bios/optionrom/Makefile"
LINKS="$LINKS pc-bios/s390-ccw/Makefile" LINKS="$LINKS pc-bios/s390-ccw/Makefile"
LINKS="$LINKS pc-bios/vof/Makefile" LINKS="$LINKS pc-bios/vof/Makefile"
@ -2360,38 +2284,26 @@ LINKS="$LINKS python"
LINKS="$LINKS contrib/plugins/Makefile " LINKS="$LINKS contrib/plugins/Makefile "
for f in $LINKS ; do for f in $LINKS ; do
if [ -e "$source_path/$f" ]; then if [ -e "$source_path/$f" ]; then
mkdir -p `dirname ./$f` mkdir -p "$(dirname ./"$f")"
symlink "$source_path/$f" "$f" symlink "$source_path/$f" "$f"
fi fi
done done
echo "# Automatically generated by configure - do not modify" > Makefile.prereqs
# Mac OS X ships with a broken assembler # Mac OS X ships with a broken assembler
roms= roms=
probe_target_compiler i386-softmmu if test "$targetos" != "darwin" && test "$targetos" != "sunos" && \
if test -n "$target_cc" && test "$targetos" != "haiku" && test "$softmmu" = yes && \
test "$targetos" != "darwin" && test "$targetos" != "sunos" && \ probe_target_compiler i386-softmmu; then
test "$targetos" != "haiku" && test "$softmmu" = yes ; then roms="pc-bios/optionrom"
# Different host OS linkers have different ideas about the name of the ELF config_mak=pc-bios/optionrom/config.mak
# emulation. Linux and OpenBSD/amd64 use 'elf_i386'; FreeBSD uses the _fbsd echo "# Automatically generated by configure - do not modify" > $config_mak
# variant; OpenBSD/i386 uses the _obsd variant; and Windows uses i386pe. echo "TOPSRC_DIR=$source_path" >> $config_mak
for emu in elf_i386 elf_i386_fbsd elf_i386_obsd i386pe; do write_target_makefile >> $config_mak
if "$target_ld" -verbose 2>&1 | grep -q "^[[:space:]]*$emu[[:space:]]*$"; then
ld_i386_emulation="$emu"
break
fi
done
if test -n "$ld_i386_emulation"; then
roms="pc-bios/optionrom"
config_mak=pc-bios/optionrom/config.mak
echo "# Automatically generated by configure - do not modify" > $config_mak
echo "TOPSRC_DIR=$source_path" >> $config_mak
echo "LD_I386_EMULATION=$ld_i386_emulation" >> $config_mak
write_target_makefile >> $config_mak
fi
fi fi
probe_target_compiler ppc-softmmu if test "$softmmu" = yes && probe_target_compiler ppc-softmmu; then
if test -n "$target_cc" && test "$softmmu" = yes; then
roms="$roms pc-bios/vof" roms="$roms pc-bios/vof"
config_mak=pc-bios/vof/config.mak config_mak=pc-bios/vof/config.mak
echo "# Automatically generated by configure - do not modify" > $config_mak echo "# Automatically generated by configure - do not modify" > $config_mak
@ -2401,8 +2313,7 @@ fi
# Only build s390-ccw bios if the compiler has -march=z900 or -march=z10 # Only build s390-ccw bios if the compiler has -march=z900 or -march=z10
# (which is the lowest architecture level that Clang supports) # (which is the lowest architecture level that Clang supports)
probe_target_compiler s390x-softmmu if test "$softmmu" = yes && probe_target_compiler s390x-softmmu; then
if test -n "$target_cc" && test "$softmmu" = yes; then
write_c_skeleton write_c_skeleton
do_compiler "$target_cc" $target_cc_cflags -march=z900 -o $TMPO -c $TMPC do_compiler "$target_cc" $target_cc_cflags -march=z900 -o $TMPO -c $TMPC
has_z900=$? has_z900=$?
@ -2425,39 +2336,7 @@ fi
####################################### #######################################
# generate config-host.mak # generate config-host.mak
# Check that the C++ compiler exists and works with the C compiler. if ! (GIT="$git" "$source_path/scripts/git-submodule.sh" "$git_submodules_action" "$git_submodules"); then
# All the QEMU_CXXFLAGS are based on QEMU_CFLAGS. Keep this at the end to don't miss any other that could be added.
if has $cxx; then
cat > $TMPC <<EOF
int c_function(void);
int main(void) { return c_function(); }
EOF
compile_object
cat > $TMPCXX <<EOF
extern "C" {
int c_function(void);
}
int c_function(void) { return 42; }
EOF
update_cxxflags
if do_cxx $CXXFLAGS $EXTRA_CXXFLAGS $CONFIGURE_CXXFLAGS $QEMU_CXXFLAGS -o $TMPE $TMPCXX $TMPO $QEMU_LDFLAGS; then
# C++ compiler $cxx works ok with C compiler $cc
:
else
echo "C++ compiler $cxx does not work with C compiler $cc"
echo "Disabling C++ specific optional code"
cxx=
fi
else
echo "No C++ compiler available; disabling C++ specific optional code"
cxx=
fi
if !(GIT="$git" "$source_path/scripts/git-submodule.sh" "$git_submodules_action" "$git_submodules"); then
exit 1 exit 1
fi fi
@ -2526,9 +2405,15 @@ if test -n "$gdb_bin"; then
gdb_version=$($gdb_bin --version | head -n 1) gdb_version=$($gdb_bin --version | head -n 1)
if version_ge ${gdb_version##* } 9.1; then if version_ge ${gdb_version##* } 9.1; then
echo "HAVE_GDB_BIN=$gdb_bin" >> $config_host_mak echo "HAVE_GDB_BIN=$gdb_bin" >> $config_host_mak
else
gdb_bin=""
fi fi
fi fi
if test "$container" != no; then
echo "ENGINE=$container" >> $config_host_mak
fi
if test "$as_shared_lib" = "yes" ; then if test "$as_shared_lib" = "yes" ; then
echo "AS_SHARED_LIB=y" >> $config_host_mak echo "AS_SHARED_LIB=y" >> $config_host_mak
fi fi
@ -2544,7 +2429,6 @@ echo "MESON=$meson" >> $config_host_mak
echo "NINJA=$ninja" >> $config_host_mak echo "NINJA=$ninja" >> $config_host_mak
echo "CC=$cc" >> $config_host_mak echo "CC=$cc" >> $config_host_mak
echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak
echo "QEMU_CXXFLAGS=$QEMU_CXXFLAGS" >> $config_host_mak
echo "QEMU_OBJCFLAGS=$QEMU_OBJCFLAGS" >> $config_host_mak echo "QEMU_OBJCFLAGS=$QEMU_OBJCFLAGS" >> $config_host_mak
echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak
echo "GLIB_LIBS=$glib_libs" >> $config_host_mak echo "GLIB_LIBS=$glib_libs" >> $config_host_mak
@ -2589,7 +2473,7 @@ fi
for target in $target_list; do for target in $target_list; do
target_dir="$target" target_dir="$target"
target_name=$(echo $target | cut -d '-' -f 1)$EXESUF target_name=$(echo $target | cut -d '-' -f 1)$EXESUF
mkdir -p $target_dir mkdir -p "$target_dir"
case $target in case $target in
*-user) symlink "../qemu-$target_name" "$target_dir/qemu-$target_name" ;; *-user) symlink "../qemu-$target_name" "$target_dir/qemu-$target_name" ;;
*) symlink "../qemu-system-$target_name" "$target_dir/qemu-system-$target_name" ;; *) symlink "../qemu-system-$target_name" "$target_dir/qemu-system-$target_name" ;;
@ -2609,29 +2493,28 @@ if test "$safe_stack" = "yes"; then
fi fi
# tests/tcg configuration # tests/tcg configuration
(makefile=tests/tcg/Makefile.prereqs (config_host_mak=tests/tcg/config-host.mak
echo "# Automatically generated by configure - do not modify" > $makefile mkdir -p tests/tcg
config_host_mak=tests/tcg/config-host.mak
echo "# Automatically generated by configure - do not modify" > $config_host_mak echo "# Automatically generated by configure - do not modify" > $config_host_mak
echo "SRC_PATH=$source_path" >> $config_host_mak echo "SRC_PATH=$source_path" >> $config_host_mak
echo "HOST_CC=$host_cc" >> $config_host_mak echo "HOST_CC=$host_cc" >> $config_host_mak
# versioned checked in the main config_host.mak above
if test -n "$gdb_bin"; then
echo "HAVE_GDB_BIN=$gdb_bin" >> $config_host_mak
fi
tcg_tests_targets= tcg_tests_targets=
for target in $target_list; do for target in $target_list; do
arch=${target%%-*} arch=${target%%-*}
config_target_mak=tests/tcg/config-$target.mak
echo "# Automatically generated by configure - do not modify" > $config_target_mak
echo "TARGET_NAME=$arch" >> $config_target_mak
case $target in case $target in
xtensa*-linux-user) xtensa*-linux-user)
# the toolchain is not complete with headers, only build softmmu tests # the toolchain is not complete with headers, only build softmmu tests
continue continue
;; ;;
*-softmmu) *-softmmu)
test -f $source_path/tests/tcg/$arch/Makefile.softmmu-target || continue test -f "$source_path/tests/tcg/$arch/Makefile.softmmu-target" || continue
qemu="qemu-system-$arch" qemu="qemu-system-$arch"
;; ;;
*-linux-user|*-bsd-user) *-linux-user|*-bsd-user)
@ -2639,85 +2522,22 @@ for target in $target_list; do
;; ;;
esac esac
probe_target_compiler $target if probe_target_compiler $target || test -n "$container_image"; then
if test $got_cross_cc = yes; then test -n "$container_image" && build_static=y
# Test for compiler features for optional tests. We only do this mkdir -p "tests/tcg/$target"
# for cross compilers because ensuring the docker containers based config_target_mak=tests/tcg/$target/config-target.mak
# compilers is a requirememt for adding a new test that needs a ln -sf "$source_path/tests/tcg/Makefile.target" "tests/tcg/$target/Makefile"
# compiler feature. echo "# Automatically generated by configure - do not modify" > "$config_target_mak"
echo "TARGET_NAME=$arch" >> "$config_target_mak"
echo "BUILD_STATIC=$build_static" >> $config_target_mak echo "TARGET=$target" >> "$config_target_mak"
write_target_makefile >> $config_target_mak write_target_makefile "build-tcg-tests-$target" >> "$config_target_mak"
case $target in echo "BUILD_STATIC=$build_static" >> "$config_target_mak"
aarch64-*) echo "QEMU=$PWD/$qemu" >> "$config_target_mak"
if do_compiler "$target_cc" $target_cflags \ echo "run-tcg-tests-$target: $qemu\$(EXESUF)" >> Makefile.prereqs
-march=armv8.1-a+sve -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_SVE=y" >> $config_target_mak
fi
if do_compiler "$target_cc" $target_cflags \
-march=armv8.1-a+sve2 -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_SVE2=y" >> $config_target_mak
fi
if do_compiler "$target_cc" $target_cflags \
-march=armv8.3-a -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_ARMV8_3=y" >> $config_target_mak
fi
if do_compiler "$target_cc" $target_cflags \
-mbranch-protection=standard -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_ARMV8_BTI=y" >> $config_target_mak
fi
if do_compiler "$target_cc" $target_cflags \
-march=armv8.5-a+memtag -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_ARMV8_MTE=y" >> $config_target_mak
fi
;;
ppc*)
if do_compiler "$target_cc" $target_cflags \
-mpower8-vector -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_POWER8_VECTOR=y" >> $config_target_mak
fi
if do_compiler "$target_cc" $target_cflags \
-mpower10 -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_POWER10=y" >> $config_target_mak
fi
;;
i386-linux-user)
if do_compiler "$target_cc" $target_cflags \
-Werror -fno-pie -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_I386_NOPIE=y" >> $config_target_mak
fi
;;
esac
elif test -n "$container_image"; then
echo "build-tcg-tests-$target: docker-image-$container_image" >> $makefile
echo "BUILD_STATIC=y" >> $config_target_mak
write_container_target_makefile >> $config_target_mak
case $target in
aarch64-*)
echo "CROSS_CC_HAS_SVE=y" >> $config_target_mak
echo "CROSS_CC_HAS_SVE2=y" >> $config_target_mak
echo "CROSS_CC_HAS_ARMV8_3=y" >> $config_target_mak
echo "CROSS_CC_HAS_ARMV8_BTI=y" >> $config_target_mak
echo "CROSS_CC_HAS_ARMV8_MTE=y" >> $config_target_mak
;;
ppc*)
echo "CROSS_CC_HAS_POWER8_VECTOR=y" >> $config_target_mak
echo "CROSS_CC_HAS_POWER10=y" >> $config_target_mak
;;
i386-linux-user)
echo "CROSS_CC_HAS_I386_NOPIE=y" >> $config_target_mak
;;
esac
got_cross_cc=yes
fi
if test $got_cross_cc = yes; then
mkdir -p tests/tcg/$target
echo "QEMU=$PWD/$qemu" >> $config_target_mak
echo "run-tcg-tests-$target: $qemu\$(EXESUF)" >> $makefile
tcg_tests_targets="$tcg_tests_targets $target" tcg_tests_targets="$tcg_tests_targets $target"
fi fi
done done
echo "TCG_TESTS_TARGETS=$tcg_tests_targets" >> $makefile) echo "TCG_TESTS_TARGETS=$tcg_tests_targets" >> config-host.mak)
if test "$skip_meson" = no; then if test "$skip_meson" = no; then
cross="config-meson.cross.new" cross="config-meson.cross.new"
@ -2735,7 +2555,6 @@ if test "$skip_meson" = no; then
echo "${a}-softmmu = '$c'" >> $cross echo "${a}-softmmu = '$c'" >> $cross
done done
test -z "$cxx" && echo "link_language = 'c'" >> $cross
echo "[built-in options]" >> $cross echo "[built-in options]" >> $cross
echo "c_args = [$(meson_quote $CFLAGS $EXTRA_CFLAGS)]" >> $cross echo "c_args = [$(meson_quote $CFLAGS $EXTRA_CFLAGS)]" >> $cross
echo "cpp_args = [$(meson_quote $CXXFLAGS $EXTRA_CXXFLAGS)]" >> $cross echo "cpp_args = [$(meson_quote $CXXFLAGS $EXTRA_CXXFLAGS)]" >> $cross
@ -2792,7 +2611,6 @@ if test "$skip_meson" = no; then
test "$fdt" != auto && meson_option_add "-Dfdt=$fdt" test "$fdt" != auto && meson_option_add "-Dfdt=$fdt"
test -n "${LIB_FUZZING_ENGINE+xxx}" && meson_option_add "-Dfuzzing_engine=$LIB_FUZZING_ENGINE" test -n "${LIB_FUZZING_ENGINE+xxx}" && meson_option_add "-Dfuzzing_engine=$LIB_FUZZING_ENGINE"
test "$qemu_suffix" != qemu && meson_option_add "-Dqemu_suffix=$qemu_suffix" test "$qemu_suffix" != qemu && meson_option_add "-Dqemu_suffix=$qemu_suffix"
test "$slirp" != auto && meson_option_add "-Dslirp=$slirp"
test "$smbd" != '' && meson_option_add "-Dsmbd=$smbd" test "$smbd" != '' && meson_option_add "-Dsmbd=$smbd"
test "$tcg" != enabled && meson_option_add "-Dtcg=$tcg" test "$tcg" != enabled && meson_option_add "-Dtcg=$tcg"
test "$vfio_user_server" != auto && meson_option_add "-Dvfio_user_server=$vfio_user_server" test "$vfio_user_server" != auto && meson_option_add "-Dvfio_user_server=$vfio_user_server"

View File

@ -10,6 +10,7 @@ bytedance.com ByteDance
cmss.chinamobile.com China Mobile cmss.chinamobile.com China Mobile
citrix.com Citrix citrix.com Citrix
crudebyte.com Crudebyte crudebyte.com Crudebyte
chinatelecom.cn China Telecom
eldorado.org.br Instituto de Pesquisas Eldorado eldorado.org.br Instituto de Pesquisas Eldorado
fujitsu.com Fujitsu fujitsu.com Fujitsu
google.com Google google.com Google
@ -19,6 +20,7 @@ ibm.com IBM
igalia.com Igalia igalia.com Igalia
intel.com Intel intel.com Intel
linaro.org Linaro linaro.org Linaro
loongson.cn Loongson Technology
lwn.net LWN lwn.net LWN
microsoft.com Microsoft microsoft.com Microsoft
mvista.com MontaVista mvista.com MontaVista

View File

@ -19,3 +19,9 @@ edu.cn
# Boston University # Boston University
bu.edu bu.edu
# Institute of Software Chinese Academy of Sciences
iscas.ac.cn
# Université Grenoble Alpes
univ-grenoble-alpes.fr

View File

@ -34,3 +34,6 @@ bmeng.cn@gmail.com
liq3ea@gmail.com liq3ea@gmail.com
chetan4windows@gmail.com chetan4windows@gmail.com
akihiko.odaki@gmail.com akihiko.odaki@gmail.com
paul@nowt.org
git@xen0n.name
simon@simonsafar.com

View File

@ -38,7 +38,7 @@ enum EvictionPolicy policy;
* put in any of the blocks inside the set. The number of block per set is * put in any of the blocks inside the set. The number of block per set is
* called the associativity (assoc). * called the associativity (assoc).
* *
* Each block contains the the stored tag and a valid bit. Since this is not * Each block contains the stored tag and a valid bit. Since this is not
* a functional simulator, the data itself is not stored. We only identify * a functional simulator, the data itself is not stored. We only identify
* whether a block is in the cache or not by searching for its tag. * whether a block is in the cache or not by searching for its tag.
* *

View File

@ -20,6 +20,9 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
/* Store last executed instruction on each vCPU as a GString */ /* Store last executed instruction on each vCPU as a GString */
GArray *last_exec; GArray *last_exec;
static GPtrArray *imatches;
static GArray *amatches;
/** /**
* Add memory read or write information to current instruction log * Add memory read or write information to current instruction log
*/ */
@ -85,12 +88,13 @@ static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
{ {
struct qemu_plugin_insn *insn; struct qemu_plugin_insn *insn;
uint64_t insn_vaddr; bool skip = (imatches || amatches);
uint32_t insn_opcode;
char *insn_disas;
size_t n = qemu_plugin_tb_n_insns(tb); size_t n = qemu_plugin_tb_n_insns(tb);
for (size_t i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
char *insn_disas;
uint64_t insn_vaddr;
/* /*
* `insn` is shared between translations in QEMU, copy needed data here. * `insn` is shared between translations in QEMU, copy needed data here.
* `output` is never freed as it might be used multiple times during * `output` is never freed as it might be used multiple times during
@ -99,20 +103,55 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
* a limitation for CISC architectures. * a limitation for CISC architectures.
*/ */
insn = qemu_plugin_tb_get_insn(tb, i); insn = qemu_plugin_tb_get_insn(tb, i);
insn_vaddr = qemu_plugin_insn_vaddr(insn);
insn_opcode = *((uint32_t *)qemu_plugin_insn_data(insn));
insn_disas = qemu_plugin_insn_disas(insn); insn_disas = qemu_plugin_insn_disas(insn);
char *output = g_strdup_printf("0x%"PRIx64", 0x%"PRIx32", \"%s\"", insn_vaddr = qemu_plugin_insn_vaddr(insn);
insn_vaddr, insn_opcode, insn_disas);
/* Register callback on memory read or write */ /*
qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, * If we are filtering we better check out if we have any
QEMU_PLUGIN_CB_NO_REGS, * hits. The skip "latches" so we can track memory accesses
QEMU_PLUGIN_MEM_RW, NULL); * after the instruction we care about.
*/
if (skip && imatches) {
int j;
for (j = 0; j < imatches->len && skip; j++) {
char *m = g_ptr_array_index(imatches, j);
if (g_str_has_prefix(insn_disas, m)) {
skip = false;
}
}
}
if (skip && amatches) {
int j;
for (j = 0; j < amatches->len && skip; j++) {
uint64_t v = g_array_index(amatches, uint64_t, j);
if (v == insn_vaddr) {
skip = false;
}
}
}
if (skip) {
g_free(insn_disas);
} else {
uint32_t insn_opcode;
insn_opcode = *((uint32_t *)qemu_plugin_insn_data(insn));
char *output = g_strdup_printf("0x%"PRIx64", 0x%"PRIx32", \"%s\"",
insn_vaddr, insn_opcode, insn_disas);
/* Register callback on memory read or write */
qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem,
QEMU_PLUGIN_CB_NO_REGS,
QEMU_PLUGIN_MEM_RW, NULL);
/* Register callback on instruction */
qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec,
QEMU_PLUGIN_CB_NO_REGS, output);
/* reset skip */
skip = (imatches || amatches);
}
/* Register callback on instruction */
qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec,
QEMU_PLUGIN_CB_NO_REGS, output);
} }
} }
@ -132,6 +171,25 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
} }
} }
/* Add a match to the array of matches */
static void parse_insn_match(char *match)
{
if (!imatches) {
imatches = g_ptr_array_new();
}
g_ptr_array_add(imatches, match);
}
static void parse_vaddr_match(char *match)
{
uint64_t v = g_ascii_strtoull(match, NULL, 16);
if (!amatches) {
amatches = g_array_new(false, true, sizeof(uint64_t));
}
g_array_append_val(amatches, v);
}
/** /**
* Install the plugin * Install the plugin
*/ */
@ -145,6 +203,19 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
*/ */
last_exec = g_array_new(FALSE, FALSE, sizeof(GString *)); last_exec = g_array_new(FALSE, FALSE, sizeof(GString *));
for (int i = 0; i < argc; i++) {
char *opt = argv[i];
g_autofree char **tokens = g_strsplit(opt, "=", 2);
if (g_strcmp0(tokens[0], "ifilter") == 0) {
parse_insn_match(tokens[1]);
} else if (g_strcmp0(tokens[0], "afilter") == 0) {
parse_vaddr_match(tokens[1]);
} else {
fprintf(stderr, "option parsing failed: %s\n", opt);
return -1;
}
}
/* Register translation block and exit callbacks */ /* Register translation block and exit callbacks */
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);

View File

@ -106,10 +106,7 @@ static void vub_req_complete(VubReq *req)
req->size + 1); req->size + 1);
vu_queue_notify(vu_dev, req->vq); vu_queue_notify(vu_dev, req->vq);
if (req->elem) { g_free(req->elem);
free(req->elem);
}
g_free(req); g_free(req);
} }
@ -243,7 +240,7 @@ static int vub_virtio_process_req(VubDev *vdev_blk,
/* refer to hw/block/virtio_blk.c */ /* refer to hw/block/virtio_blk.c */
if (elem->out_num < 1 || elem->in_num < 1) { if (elem->out_num < 1 || elem->in_num < 1) {
fprintf(stderr, "virtio-blk request missing headers\n"); fprintf(stderr, "virtio-blk request missing headers\n");
free(elem); g_free(elem);
return -1; return -1;
} }
@ -325,7 +322,7 @@ static int vub_virtio_process_req(VubDev *vdev_blk,
return 0; return 0;
err: err:
free(elem); g_free(elem);
g_free(req); g_free(req);
return -1; return -1;
} }

Some files were not shown because too many files have changed in this diff Show More