Merge and port to QEMU 8

This commit is contained in:
Andrea Fioraldi 2023-06-01 17:01:39 +02:00
commit 381b916b20
2088 changed files with 117360 additions and 53417 deletions

View File

@ -1,109 +0,0 @@
env:
CIRRUS_CLONE_DEPTH: 1
windows_msys2_task:
timeout_in: 90m
windows_container:
image: cirrusci/windowsservercore:2019
os_version: 2019
cpu: 8
memory: 8G
env:
CIRRUS_SHELL: powershell
MSYS: winsymlinks:native
MSYSTEM: MINGW64
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe
MSYS2_FINGERPRINT: 0
MSYS2_PACKAGES: "
diffutils git grep make pkg-config sed
mingw-w64-x86_64-python
mingw-w64-x86_64-python-sphinx
mingw-w64-x86_64-toolchain
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-gtk3
mingw-w64-x86_64-glib2
mingw-w64-x86_64-ninja
mingw-w64-x86_64-jemalloc
mingw-w64-x86_64-lzo2
mingw-w64-x86_64-zstd
mingw-w64-x86_64-libjpeg-turbo
mingw-w64-x86_64-pixman
mingw-w64-x86_64-libgcrypt
mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
mingw-w64-x86_64-snappy
mingw-w64-x86_64-libusb
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-libtasn1
mingw-w64-x86_64-nettle
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-curl
mingw-w64-x86_64-gnutls
mingw-w64-x86_64-libnfs
"
CHERE_INVOKING: 1
msys2_cache:
folder: C:\tools\archive
reupload_on_changes: false
# These env variables are used to generate fingerprint to trigger the cache procedure
# If wanna to force re-populate msys2, increase MSYS2_FINGERPRINT
fingerprint_script:
- |
echo $env:CIRRUS_TASK_NAME
echo $env:MSYS2_URL
echo $env:MSYS2_FINGERPRINT
echo $env:MSYS2_PACKAGES
populate_script:
- |
md -Force C:\tools\archive\pkg
$start_time = Get-Date
bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND $env:MSYS2_URL C:\tools\archive\base.exe
Write-Output "Download time taken: $((Get-Date).Subtract($start_time))"
cd C:\tools
C:\tools\archive\base.exe -y
del -Force C:\tools\archive\base.exe
Write-Output "Base install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
((Get-Content -path C:\tools\msys64\etc\\post-install\\07-pacman-key.post -Raw) -replace '--refresh-keys', '--version') | Set-Content -Path C:\tools\msys64\etc\\post-install\\07-pacman-key.post
C:\tools\msys64\usr\bin\bash.exe -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf"
C:\tools\msys64\usr\bin\bash.exe -lc "export"
C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Sy
echo Y | C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Suu --overwrite=*
taskkill /F /FI "MODULES eq msys-2.0.dll"
tasklist
C:\tools\msys64\usr\bin\bash.exe -lc "mv -f /etc/pacman.conf.pacnew /etc/pacman.conf || true"
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Syuu --overwrite=*"
Write-Output "Core install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -S --needed $env:MSYS2_PACKAGES"
Write-Output "Package install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\etc\mtab
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\fd
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stderr
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdin
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdout
del -Force -Recurse -ErrorAction SilentlyContinue C:\tools\msys64\var\cache\pacman\pkg
tar cf C:\tools\archive\msys64.tar -C C:\tools\ msys64
Write-Output "Package archive time taken: $((Get-Date).Subtract($start_time))"
del -Force -Recurse -ErrorAction SilentlyContinue c:\tools\msys64
install_script:
- |
$start_time = Get-Date
cd C:\tools
ls C:\tools\archive\msys64.tar
tar xf C:\tools\archive\msys64.tar
Write-Output "Extract msys2 time taken: $((Get-Date).Subtract($start_time))"
script:
- C:\tools\msys64\usr\bin\bash.exe -lc "mkdir build"
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && ../configure --python=python3"
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make -j8"
- exit $LastExitCode
test_script:
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make V=1 check"
- exit $LastExitCode

21
.git-blame-ignore-revs Normal file
View File

@ -0,0 +1,21 @@
#
# List of code-formatting clean ups the git blame can ignore
#
# git blame --ignore-revs-file .git-blame-ignore-revs
#
# or
#
# git config blame.ignoreRevsFile .git-blame-ignore-revs
#
# gdbstub: clean-up indents
ad9e4585b3c7425759d3eea697afbca71d2c2082
# e1000e: fix code style
0eadd56bf53ab196a16d492d7dd31c62e1c24c32
# target/riscv: coding style fixes
8c7feddddd9218b407792120bcfda0347ed16205
# replace TABs with spaces
48805df9c22a0700fba4b3b548fafaa21726ca68

2
.gitignore vendored
View File

@ -22,3 +22,5 @@ GTAGS
*.swp
*.patch
*.gcov
/subprojects/slirp

View File

@ -75,5 +75,5 @@
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: manual
# Jobs can run if any jobs they depend on were successfull
# Jobs can run if any jobs they depend on were successful
- when: on_success

View File

@ -5,19 +5,15 @@
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
- if test -n "$LD_JOBS";
then
scripts/git-submodule.sh update meson ;
fi
- mkdir build
- cd build
- ../configure --enable-werror --disable-docs --enable-fdt=system
${LD_JOBS:+--meson=git} ${TARGETS:+--target-list="$TARGETS"}
${TARGETS:+--target-list="$TARGETS"}
$CONFIGURE_ARGS ||
{ cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS";
then
../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ;
pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
fi || exit 1;
- make -j"$JOBS"
- if test -n "$MAKE_CHECK_ARGS";
@ -25,6 +21,22 @@
make -j"$JOBS" $MAKE_CHECK_ARGS ;
fi
# We jump some hoops in common_test_job_template to avoid
# rebuilding all the object files we skip in the artifacts
.native_build_artifact_template:
artifacts:
expire_in: 2 days
paths:
- build
- .git-submodule-status
exclude:
- build/**/*.p
- build/**/*.a.p
- build/**/*.fa.p
- build/**/*.c.o
- build/**/*.c.o.d
- build/**/*.fa
.common_test_job_template:
extends: .base_job_template
stage: test

View File

@ -2,20 +2,16 @@ include:
- local: '/.gitlab-ci.d/buildtest-template.yml'
build-system-alpine:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
- job: amd64-alpine-container
variables:
IMAGE: alpine
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
TARGETS: avr-softmmu loongarch64-softmmu mips64-softmmu mipsel-softmmu
MAKE_CHECK_ARGS: check-build
CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog
artifacts:
expire_in: 2 days
paths:
- .git-submodule-status
- build
check-system-alpine:
extends: .native_test_job_template
@ -36,19 +32,17 @@ avocado-system-alpine:
MAKE_CHECK_ARGS: check-avocado
build-system-ubuntu:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
job: amd64-ubuntu2004-container
job: amd64-ubuntu2204-container
variables:
IMAGE: ubuntu2004
IMAGE: ubuntu2204
CONFIGURE_ARGS: --enable-docs
TARGETS: alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-ubuntu:
extends: .native_test_job_template
@ -56,7 +50,7 @@ check-system-ubuntu:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
IMAGE: ubuntu2204
MAKE_CHECK_ARGS: check
avocado-system-ubuntu:
@ -65,23 +59,21 @@ avocado-system-ubuntu:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
IMAGE: ubuntu2204
MAKE_CHECK_ARGS: check-avocado
build-system-debian:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
CONFIGURE_ARGS: --with-coroutine=sigaltstack
TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
sparc-softmmu xtensaeb-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-debian:
extends: .native_test_job_template
@ -110,11 +102,13 @@ crash-test-debian:
IMAGE: debian-amd64
script:
- cd build
- make check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386
- make NINJA=":" check-venv
- pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
build-system-fedora:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
job: amd64-fedora-container
variables:
@ -123,10 +117,6 @@ build-system-fedora:
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-fedora:
extends: .native_test_job_template
@ -155,12 +145,14 @@ crash-test-fedora:
IMAGE: fedora
script:
- cd build
- make check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
- make NINJA=":" check-venv
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
build-system-centos:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
job: amd64-centos8-container
variables:
@ -170,10 +162,6 @@ build-system-centos:
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-centos:
extends: .native_test_job_template
@ -194,17 +182,15 @@ avocado-system-centos:
MAKE_CHECK_ARGS: check-avocado
build-system-opensuse:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
job: amd64-opensuse-leap-container
variables:
IMAGE: opensuse-leap
TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-opensuse:
extends: .native_test_job_template
@ -339,7 +325,9 @@ clang-user:
# Split in three sets of build/check/avocado to limit the execution time of each
# job
build-cfi-aarch64:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
- job: amd64-fedora-container
variables:
@ -355,10 +343,6 @@ build-cfi-aarch64:
# skipped until the situation has been solved.
QEMU_JOB_SKIPPED: 1
timeout: 90m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-aarch64:
extends: .native_test_job_template
@ -379,7 +363,9 @@ avocado-cfi-aarch64:
MAKE_CHECK_ARGS: check-avocado
build-cfi-ppc64-s390x:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
- job: amd64-fedora-container
variables:
@ -395,10 +381,6 @@ build-cfi-ppc64-s390x:
# skipped until the situation has been solved.
QEMU_JOB_SKIPPED: 1
timeout: 80m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-ppc64-s390x:
extends: .native_test_job_template
@ -419,7 +401,9 @@ avocado-cfi-ppc64-s390x:
MAKE_CHECK_ARGS: check-avocado
build-cfi-x86_64:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
- job: amd64-fedora-container
variables:
@ -431,10 +415,6 @@ build-cfi-x86_64:
TARGETS: x86_64-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-x86_64:
extends: .native_test_job_template
@ -457,22 +437,21 @@ avocado-cfi-x86_64:
tsan-build:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
job: amd64-ubuntu2204-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10
IMAGE: ubuntu2204
CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++
--enable-trace-backends=ust --disable-slirp
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
MAKE_CHECK_ARGS: bench V=1
# gcov is a GCC features
gcov:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
job: amd64-ubuntu2204-container
timeout: 80m
variables:
IMAGE: ubuntu2004
IMAGE: ubuntu2204
CONFIGURE_ARGS: --enable-gcov
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
MAKE_CHECK_ARGS: check
@ -548,7 +527,7 @@ build-without-defaults:
--disable-strip
TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user
MAKE_CHECK_ARGS: check-unit check-qtest-avr check-qtest-mips64
MAKE_CHECK_ARGS: check
build-libvhost-user:
extends: .base_job_template
@ -565,7 +544,9 @@ build-libvhost-user:
# No targets are built here, just tools, docs, and unit tests. This
# also feeds into the eventual documentation deployment steps later
build-tools-and-docs-debian:
extends: .native_build_job_template
extends:
- .native_build_job_template
- .native_build_artifact_template
needs:
job: amd64-debian-container
# when running on 'master' we use pre-existing container
@ -575,10 +556,6 @@ build-tools-and-docs-debian:
MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
QEMU_JOB_PUBLISH: 1
artifacts:
expire_in: 2 days
paths:
- build
# Prepare for GitLab pages deployment. Anything copied into the
# "public" directory will be deployed to $USER.gitlab.io/$PROJECT

View File

@ -44,19 +44,6 @@
variables:
QEMU_JOB_CIRRUS: 1
x64-freebsd-12-build:
extends: .cirrus_build_job
variables:
NAME: freebsd-12
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-12-4
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update; pkg upgrade -y
INSTALL_COMMAND: pkg install -y
TEST_TARGETS: check
x64-freebsd-13-build:
extends: .cirrus_build_job
variables:

View File

@ -32,6 +32,9 @@ build_task:
- $MAKE -j$(sysctl -n hw.ncpu)
- for TARGET in $TEST_TARGETS ;
do
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 ;
done
always:
build_result_artifacts:
path: build/meson-logs/*log.txt
type: text/plain

View File

@ -1,16 +0,0 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables freebsd-12 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
CCACHE='/usr/local/bin/ccache'
CPAN_PKGS=''
CROSS_PKGS=''
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract usbredir virglrenderer vte3 zstd'
PYPI_PKGS=''
PYTHON='/usr/local/bin/python3'

View File

@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract usbredir virglrenderer vte3 zstd'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
PYPI_PKGS=''
PYTHON='/usr/local/bin/python3'

View File

@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake'
NINJA='/opt/homebrew/bin/ninja'
PACKAGING_COMMAND='brew'
PIP3='/opt/homebrew/bin/pip3'
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract usbredir vde vte3 zlib zstd'
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 xorriso zlib zstd'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
PYTHON='/opt/homebrew/bin/python3'

View File

@ -1,22 +1,21 @@
.container_job_template:
extends: .base_job_template
image: docker:stable
image: docker:latest
stage: containers
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
- apk add python3
- docker info
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
- until docker info; do sleep 1; done
script:
- echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG"
- ./tests/docker/docker.py --engine docker build
-t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker"
-r $CI_REGISTRY/qemu-project/qemu
- docker tag "qemu/$NAME" "$TAG"
- docker build --tag "$TAG" --cache-from "$TAG" --cache-from "$COMMON_TAG"
--build-arg BUILDKIT_INLINE_CACHE=1
-f "tests/docker/dockerfiles/$NAME.docker" "."
- docker push "$TAG"
after_script:
- docker logout

View File

@ -13,10 +13,10 @@ amd64-debian-container:
variables:
NAME: debian-amd64
amd64-ubuntu2004-container:
amd64-ubuntu2204-container:
extends: .container_job_template
variables:
NAME: ubuntu2004
NAME: ubuntu2204
amd64-opensuse-leap-container:
extends: .container_job_template

View File

@ -49,3 +49,14 @@
nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
xtensa-linux-user $CROSS_SKIP_TARGETS"
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
# We can still run some tests on some of our cross build jobs. They can add this
# template to their extends to save the build logs and test results
.cross_test_artifacts:
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: build/meson-logs/testlog.junit.xml

View File

@ -1,13 +1,6 @@
include:
- local: '/.gitlab-ci.d/crossbuild-template.yml'
cross-armel-system:
extends: .cross_system_build_job
needs:
job: armel-debian-cross-container
variables:
IMAGE: debian-armel-cross
cross-armel-user:
extends: .cross_user_build_job
needs:
@ -15,13 +8,6 @@ cross-armel-user:
variables:
IMAGE: debian-armel-cross
cross-armhf-system:
extends: .cross_system_build_job
needs:
job: armhf-debian-cross-container
variables:
IMAGE: debian-armhf-cross
cross-armhf-user:
extends: .cross_user_build_job
needs:
@ -43,16 +29,10 @@ cross-arm64-user:
variables:
IMAGE: debian-arm64-cross
cross-i386-system:
extends: .cross_system_build_job
needs:
job: i386-fedora-cross-container
variables:
IMAGE: fedora-i386-cross
MAKE_CHECK_ARGS: check-qtest
cross-i386-user:
extends: .cross_user_build_job
extends:
- .cross_user_build_job
- .cross_test_artifacts
needs:
job: i386-fedora-cross-container
variables:
@ -60,7 +40,9 @@ cross-i386-user:
MAKE_CHECK_ARGS: check
cross-i386-tci:
extends: .cross_accel_build_job
extends:
- .cross_accel_build_job
- .cross_test_artifacts
timeout: 60m
needs:
job: i386-fedora-cross-container

View File

@ -15,12 +15,15 @@ variables:
# All custom runners can extend this template to upload the testlog
# data as an artifact and also feed the junit report
.custom_artifacts_template:
.custom_runner_template:
extends: .base_job_template
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days
when: always
paths:
- build/meson-logs/testlog.txt
- build/build.ninja
- build/meson-logs
reports:
junit: build/meson-logs/testlog.junit.xml

View File

@ -1,4 +1,9 @@
# All centos-stream-8 jobs should run successfully in an environment
# setup by the scripts/ci/setup/stream/8/build-environment.yml task
# "Installation of extra packages to build QEMU"
centos-stream-8-x86_64:
extends: .custom_runner_template
allow_failure: true
needs: []
stage: build
@ -8,15 +13,6 @@ centos-stream-8-x86_64:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: on_failure
expire_in: 7 days
paths:
- build/tests/results/latest/results.xml
- build/tests/results/latest/test-results
reports:
junit: build/tests/results/latest/results.xml
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
@ -25,6 +21,4 @@ centos-stream-8-x86_64:
- ../scripts/ci/org.centos/stream/8/x86_64/configure
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make -j"$JOBS"
- make NINJA=":" check
|| { cat meson-logs/testlog.txt; exit 1; } ;
- ../scripts/ci/org.centos/stream/8/x86_64/test-avocado
- make NINJA=":" check check-avocado

View File

@ -3,7 +3,7 @@
# "Install basic packages to build QEMU on Ubuntu 20.04/20.04"
ubuntu-20.04-s390x-all-linux-static:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -24,7 +24,7 @@ ubuntu-20.04-s390x-all-linux-static:
- make --output-sync -j`nproc` check
ubuntu-20.04-s390x-all:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -43,7 +43,7 @@ ubuntu-20.04-s390x-all:
- make --output-sync -j`nproc` check
ubuntu-20.04-s390x-alldbg:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -66,7 +66,7 @@ ubuntu-20.04-s390x-alldbg:
- make --output-sync -j`nproc` check
ubuntu-20.04-s390x-clang:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -108,7 +108,7 @@ ubuntu-20.04-s390x-tci:
- make --output-sync -j`nproc`
ubuntu-20.04-s390x-notcg:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:

View File

@ -1,9 +1,9 @@
# All ubuntu-22.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 20.04"
# "Install basic packages to build QEMU on Ubuntu 22.04"
ubuntu-22.04-aarch32-all:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:

View File

@ -1,9 +1,9 @@
# All ubuntu-20.04 jobs should run successfully in an environment
# All ubuntu-22.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 20.04"
# "Install basic packages to build QEMU on Ubuntu 22.04"
ubuntu-22.04-aarch64-all-linux-static:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -24,7 +24,7 @@ ubuntu-22.04-aarch64-all-linux-static:
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-all:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -45,8 +45,30 @@ ubuntu-22.04-aarch64-all:
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-without-defaults:
extends: .custom_runner_template
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --disable-user --without-default-devices --without-default-features
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-alldbg:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -65,7 +87,7 @@ ubuntu-22.04-aarch64-alldbg:
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-clang:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -107,7 +129,7 @@ ubuntu-22.04-aarch64-tci:
- make --output-sync -j`nproc --ignore=40`
ubuntu-22.04-aarch64-notcg:
extends: .custom_artifacts_template
extends: .custom_runner_template
needs: []
stage: build
tags:
@ -123,7 +145,7 @@ ubuntu-22.04-aarch64-notcg:
script:
- mkdir build
- cd build
- ../configure --disable-tcg
- ../configure --disable-tcg --with-devices-aarch64=minimal
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check

View File

@ -1,85 +0,0 @@
# All jobs needing docker-edk2 must use the same rules it uses.
.edk2_job_rules:
rules:
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
# In forks, if QEMU_CI=1 is set, then create manual job
# if any of the files affecting the build are touched
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
changes:
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
- roms/edk2/*
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if the branch/tag starts with 'edk2'
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/'
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if last commit msg contains 'EDK2' (case insensitive)
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i'
when: manual
# Run if any files affecting the build output are touched
- changes:
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
- roms/edk2/*
when: on_success
# Run if the branch/tag starts with 'edk2'
- if: '$CI_COMMIT_REF_NAME =~ /^edk2/'
when: on_success
# Run if last commit msg contains 'EDK2' (case insensitive)
- if: '$CI_COMMIT_MESSAGE =~ /edk2/i'
when: on_success
docker-edk2:
extends: .edk2_job_rules
stage: containers
image: docker:19.03.1
services:
- docker:19.03.1-dind
variables:
GIT_DEPTH: 3
IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build
# We don't use TLS
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- docker pull $IMAGE_TAG || true
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
--tag $IMAGE_TAG .gitlab-ci.d/edk2
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker push $IMAGE_TAG
build-edk2:
extends: .edk2_job_rules
stage: build
needs: ['docker-edk2']
artifacts:
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/edk2*bz2
- pc-bios/edk2-licenses.txt
- edk2-stdout.log
- edk2-stderr.log
image: $CI_REGISTRY_IMAGE:edk2-cross-build
variables:
GIT_DEPTH: 3
script: # Clone the required submodules and build EDK2
- git submodule update --init roms/edk2
- git -C roms/edk2 submodule update --init --
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
BaseTools/Source/C/BrotliCompress/brotli
CryptoPkg/Library/OpensslLib/openssl
MdeModulePkg/Library/BrotliCustomDecompressLib/brotli
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
- echo "=== Using ${JOBS} simultaneous jobs ==="
- make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2

View File

@ -1,27 +0,0 @@
#
# Docker image to cross-compile EDK2 firmware binaries
#
FROM ubuntu:18.04
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
# Install packages required to build EDK2
RUN apt update \
&& \
\
DEBIAN_FRONTEND=noninteractive \
apt install --assume-yes --no-install-recommends \
build-essential \
ca-certificates \
dos2unix \
gcc-aarch64-linux-gnu \
gcc-arm-linux-gnueabi \
git \
iasl \
make \
nasm \
python3 \
uuid-dev \
&& \
\
rm -rf /var/lib/apt/lists/*

View File

@ -42,17 +42,15 @@
docker-opensbi:
extends: .opensbi_job_rules
stage: containers
image: docker:19.03.1
image: docker:latest
services:
- docker:19.03.1-dind
- docker:dind
variables:
GIT_DEPTH: 3
IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
# We don't use TLS
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- until docker info; do sleep 1; done
script:
- docker pull $IMAGE_TAG || true
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA

View File

@ -15,6 +15,7 @@ RUN apt update \
ca-certificates \
git \
make \
python3 \
wget \
&& \
\

View File

@ -1,10 +1,16 @@
# This file contains the set of jobs run by the QEMU project:
# https://gitlab.com/qemu-project/qemu/-/pipelines
variables:
RUNNER_TAG: ""
default:
tags:
- $RUNNER_TAG
include:
- local: '/.gitlab-ci.d/base.yml'
- local: '/.gitlab-ci.d/stages.yml'
- local: '/.gitlab-ci.d/edk2.yml'
- local: '/.gitlab-ci.d/opensbi.yml'
- local: '/.gitlab-ci.d/containers.yml'
- local: '/.gitlab-ci.d/crossbuilds.yml'

View File

@ -59,6 +59,7 @@ msys2-64bit:
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-snappy
mingw-w64-x86_64-spice
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
@ -108,6 +109,7 @@ msys2-32bit:
mingw-w64-i686-SDL2
mingw-w64-i686-SDL2_image
mingw-w64-i686-snappy
mingw-w64-i686-spice
mingw-w64-i686-usbredir
mingw-w64-i686-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory

11
.gitmodules vendored
View File

@ -13,8 +13,8 @@
[submodule "roms/qemu-palcode"]
path = roms/qemu-palcode
url = https://gitlab.com/qemu-project/qemu-palcode.git
[submodule "dtc"]
path = dtc
[submodule "subprojects/dtc"]
path = subprojects/dtc
url = https://gitlab.com/qemu-project/dtc.git
[submodule "roms/u-boot"]
path = roms/u-boot
@ -25,8 +25,8 @@
[submodule "roms/QemuMacDrivers"]
path = roms/QemuMacDrivers
url = https://gitlab.com/qemu-project/QemuMacDrivers.git
[submodule "ui/keycodemapdb"]
path = ui/keycodemapdb
[submodule "subprojects/keycodemapdb"]
path = subprojects/keycodemapdb
url = https://gitlab.com/qemu-project/keycodemapdb.git
[submodule "roms/seabios-hppa"]
path = roms/seabios-hppa
@ -49,9 +49,6 @@
[submodule "roms/qboot"]
path = roms/qboot
url = https://gitlab.com/qemu-project/qboot.git
[submodule "meson"]
path = meson
url = https://gitlab.com/qemu-project/meson.git
[submodule "roms/vbootrom"]
path = roms/vbootrom
url = https://gitlab.com/qemu-project/vbootrom.git

View File

@ -54,8 +54,10 @@ Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com>
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca>
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com>
@ -76,6 +78,7 @@ Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
# Also list preferred name forms where people have changed their
# git author config, or had utf8/latin1 encoding issues.

View File

@ -237,13 +237,15 @@ jobs:
- libglib2.0-dev
- libgnutls28-dev
- ninja-build
- flex
- bison
env:
- CONFIG="--disable-containers --disable-system"
- name: "[s390x] Clang (disable-tcg)"
arch: s390x
dist: focal
compiler: clang
compiler: clang-10
addons:
apt_packages:
- libaio-dev
@ -269,6 +271,7 @@ jobs:
- libvdeplug-dev
- libvte-2.91-dev
- ninja-build
- clang-10
env:
- TEST_CMD="make check-unit"
- CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools

View File

@ -64,6 +64,21 @@ L: qemu-devel@nongnu.org
F: *
F: */
Project policy and developer guides
R: Alex Bennée <alex.bennee@linaro.org>
R: Daniel P. Berrangé <berrange@redhat.com>
R: Thomas Huth <thuth@redhat.com>
R: Markus Armbruster <armbru@redhat.com>
R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Juan Quintela <quintela@redhat.com>
W: https://www.qemu.org/docs/master/devel/index.html
S: Odd Fixes
F: docs/devel/style.rst
F: docs/devel/code-of-conduct.rst
F: docs/devel/conflict-resolution.rst
F: docs/devel/submitting-a-patch.rst
F: docs/devel/submitting-a-pull-request.rst
Responsible Disclosure, Reporting Security Issues
-------------------------------------------------
W: https://wiki.qemu.org/SecurityProcess
@ -123,6 +138,7 @@ M: Richard Henderson <richard.henderson@linaro.org>
R: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: softmmu/cpus.c
F: softmmu/watchpoint.c
F: cpus-common.c
F: page-vary.c
F: page-vary-common.c
@ -135,10 +151,15 @@ F: docs/devel/decodetree.rst
F: docs/devel/tcg*
F: include/exec/cpu*.h
F: include/exec/exec-all.h
F: include/exec/tb-flush.h
F: include/exec/target_long.h
F: include/exec/helper*.h
F: include/sysemu/cpus.h
F: include/sysemu/tcg.h
F: include/hw/core/tcg-cpu-ops.h
F: host/include/*/host/cpuinfo.h
F: util/cpuinfo-*.c
F: include/tcg/
FPU emulation
M: Aurelien Jarno <aurelien@aurel32.net>
@ -197,7 +218,7 @@ F: tests/tcg/cris/
F: disas/cris.c
Hexagon TCG CPUs
M: Taylor Simpson <tsimpson@quicinc.com>
M: Brian Cain <bcain@quicinc.com>
S: Supported
F: target/hexagon/
X: target/hexagon/idef-parser/
@ -207,6 +228,7 @@ F: tests/tcg/hexagon/
F: disas/hexagon.c
F: configs/targets/hexagon-linux-user/default.mak
F: docker/dockerfiles/debian-hexagon-cross.docker
F: gdb-xml/hexagon*.xml
Hexagon idef-parser
M: Alessandro Di Federico <ale@rev.ng>
@ -227,6 +249,7 @@ M: Xiaojuan Yang <yangxiaojuan@loongson.cn>
S: Maintained
F: target/loongarch/
F: tests/tcg/loongarch64/
F: tests/avocado/machine_loongarch.py
M68K TCG CPUs
M: Laurent Vivier <laurent@vivier.eu>
@ -254,9 +277,9 @@ F: docs/system/cpu-models-mips.rst.inc
F: tests/tcg/mips/
NiosII TCG CPUs
M: Chris Wulff <crwulff@gmail.com>
M: Marek Vasut <marex@denx.de>
S: Maintained
R: Chris Wulff <crwulff@gmail.com>
R: Marek Vasut <marex@denx.de>
S: Orphan
F: target/nios2/
F: hw/nios2/
F: disas/nios2.c
@ -310,7 +333,7 @@ F: target/riscv/xthead*.decode
RISC-V XVentanaCondOps extension
M: Philipp Tomsich <philipp.tomsich@vrull.eu>
L: qemu-riscv@nongnu.org
S: Supported
S: Maintained
F: target/riscv/XVentanaCondOps.decode
F: target/riscv/insn_trans/trans_xventanacondops.c.inc
@ -368,6 +391,7 @@ S: Maintained
F: target/xtensa/
F: hw/xtensa/
F: tests/tcg/xtensa/
F: tests/tcg/xtensaeb/
F: disas/xtensa.c
F: include/hw/xtensa/xtensa-isa.h
F: configs/devices/xtensa*/default.mak
@ -442,6 +466,15 @@ F: target/i386/kvm/
F: target/i386/sev*
F: scripts/kvm/vmxcap
Xen emulation on X86 KVM CPUs
M: David Woodhouse <dwmw2@infradead.org>
M: Paul Durrant <paul@xen.org>
S: Supported
F: include/sysemu/kvm_xen.h
F: target/i386/kvm/xen*
F: hw/i386/kvm/xen*
F: tests/avocado/xen_guest.py
Guest CPU Cores (other accelerators)
------------------------------------
Overall
@ -911,10 +944,12 @@ SBSA-REF
M: Radoslaw Biernacki <rad@semihalf.com>
M: Peter Maydell <peter.maydell@linaro.org>
R: Leif Lindholm <quic_llindhol@quicinc.com>
R: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/sbsa-ref.c
F: docs/system/arm/sbsa.rst
F: tests/avocado/machine_aarch64_sbsaref.py
Sharp SL-5500 (Collie) PDA
M: Peter Maydell <peter.maydell@linaro.org>
@ -998,12 +1033,6 @@ S: Maintained
F: hw/ssi/xlnx-versal-ospi.c
F: include/hw/ssi/xlnx-versal-ospi.h
ARM ACPI Subsystem
M: Shannon Zhao <shannon.zhaosl@gmail.com>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/virt-acpi-build.c
STM32F100
M: Alexandre Iooss <erdnaxe@crans.org>
L: qemu-arm@nongnu.org
@ -1090,7 +1119,7 @@ F: include/hw/misc/pca9552*.h
F: hw/net/ftgmac100.c
F: include/hw/net/ftgmac100.h
F: docs/system/arm/aspeed.rst
F: tests/qtest/*aspeed*
F: tests/*/*aspeed*
F: hw/arm/fby35.c
NRF51
@ -1399,6 +1428,7 @@ M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: Cédric Le Goater <clg@kaod.org>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
R: Harsh Prateek Bora <harshpb@linux.ibm.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/*/spapr*
@ -1679,6 +1709,7 @@ F: hw/i2c/smbus_ich9.c
F: hw/acpi/piix4.c
F: hw/acpi/ich9*.c
F: include/hw/acpi/ich9*.h
F: include/hw/southbridge/ich9.h
F: include/hw/southbridge/piix.h
F: hw/isa/apm.c
F: include/hw/isa/apm.h
@ -1711,6 +1742,7 @@ F: include/hw/char/parallel.h
F: include/hw/dma/i8257.h
F: include/hw/i2c/pm_smbus.h
F: include/hw/input/i8042.h
F: include/hw/intc/ioapic*
F: include/hw/isa/i8259_internal.h
F: include/hw/isa/superio.h
F: include/hw/timer/hpet.h
@ -1795,7 +1827,7 @@ F: hw/misc/edu.c
IDE
M: John Snow <jsnow@redhat.com>
L: qemu-block@nongnu.org
S: Supported
S: Odd Fixes
F: include/hw/ide.h
F: include/hw/ide/
F: hw/ide/
@ -1820,7 +1852,7 @@ T: git https://github.com/cminyard/qemu.git master-ipmi-rebase
Floppy
M: John Snow <jsnow@redhat.com>
L: qemu-block@nongnu.org
S: Supported
S: Odd Fixes
F: hw/block/fdc.c
F: hw/block/fdc-internal.h
F: hw/block/fdc-isa.c
@ -1871,7 +1903,7 @@ F: hw/pci/pcie_doe.c
ACPI/SMBIOS
M: Michael S. Tsirkin <mst@redhat.com>
M: Igor Mammedov <imammedo@redhat.com>
R: Ani Sinha <ani@anisinha.ca>
R: Ani Sinha <anisinha@redhat.com>
S: Supported
F: include/hw/acpi/*
F: include/hw/firmware/smbios.h
@ -1889,6 +1921,18 @@ F: docs/specs/acpi_nvdimm.rst
F: docs/specs/acpi_pci_hotplug.rst
F: docs/specs/acpi_hw_reduced_hotplug.rst
ARM ACPI Subsystem
M: Shannon Zhao <shannon.zhaosl@gmail.com>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/virt-acpi-build.c
RISC-V ACPI Subsystem
M: Sunil V L <sunilvl@ventanamicro.com>
L: qemu-riscv@nongnu.org
S: Maintained
F: hw/riscv/virt-acpi-build.c
ACPI/VIOT
M: Jean-Philippe Brucker <jean-philippe@linaro.org>
S: Supported
@ -1896,7 +1940,7 @@ F: hw/acpi/viot.c
F: hw/acpi/viot.h
ACPI/AVOCADO/BIOSBITS
M: Ani Sinha <ani@anisinha.ca>
M: Ani Sinha <anisinha@redhat.com>
M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
F: tests/avocado/acpi-bits/*
@ -2040,6 +2084,10 @@ F: backends/vhost-user.c
F: include/sysemu/vhost-user-backend.h
F: subprojects/libvhost-user/
vhost-shadow-virtqueue
R: Eugenio Pérez <eperezma@redhat.com>
F: hw/virtio/vhost-shadow-virtqueue.*
virtio
M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
@ -2098,7 +2146,6 @@ T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
virtiofs
M: Dr. David Alan Gilbert <dgilbert@redhat.com>
M: Stefan Hajnoczi <stefanha@redhat.com>
S: Supported
F: hw/virtio/vhost-user-fs*
@ -2193,6 +2240,7 @@ F: tests/qtest/fuzz-megasas-test.c
Network packet abstractions
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: include/net/eth.h
F: net/eth.c
@ -2216,14 +2264,28 @@ F: docs/specs/rocker.txt
e1000x
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: hw/net/e1000x*
e1000e
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: hw/net/e1000e*
F: tests/qtest/fuzz-e1000e-test.c
F: tests/qtest/e1000e-test.c
F: tests/qtest/libqos/e1000e.*
igb
M: Akihiko Odaki <akihiko.odaki@daynix.com>
R: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
S: Maintained
F: docs/system/devices/igb.rst
F: hw/net/igb*
F: tests/avocado/netdev-ethtool.py
F: tests/qtest/igb-test.c
F: tests/qtest/libqos/igb.c
eepro100
M: Stefan Weil <sw@weilnetz.de>
@ -2487,6 +2549,7 @@ Subsystems
----------
Overall Audio backends
M: Gerd Hoffmann <kraxel@redhat.com>
M: Marc-André Lureau <marcandre.lureau@redhat.com>
S: Odd Fixes
F: audio/
X: audio/alsaaudio.c
@ -2510,7 +2573,7 @@ Core Audio framework backend
M: Gerd Hoffmann <kraxel@redhat.com>
M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Christian Schoenebeck <qemu_oss@crudebyte.com>
R: Akihiko Odaki <akihiko.odaki@gmail.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Odd Fixes
F: audio/coreaudio.c
@ -2632,8 +2695,8 @@ T: git https://gitlab.com/jsnow/qemu.git jobs
T: git https://gitlab.com/vsementsov/qemu.git block
Compute Express Link
M: Ben Widawsky <ben.widawsky@intel.com>
M: Jonathan Cameron <jonathan.cameron@huawei.com>
R: Fan Ni <fan.ni@samsung.com>
S: Supported
F: hw/cxl/
F: hw/mem/cxl_type3.c
@ -2731,9 +2794,11 @@ S: Maintained
F: docs/system/gdb.rst
F: gdbstub/*
F: include/exec/gdbstub.h
F: include/gdbstub/*
F: gdb-xml/
F: tests/tcg/multiarch/gdbstub/
F: scripts/feature_to_c.sh
F: scripts/probe-gdb-support.py
Memory API
M: Paolo Bonzini <pbonzini@redhat.com>
@ -2781,6 +2846,7 @@ F: docs/spice-port-fqdn.txt
Graphics
M: Gerd Hoffmann <kraxel@redhat.com>
M: Marc-André Lureau <marcandre.lureau@redhat.com>
S: Odd Fixes
F: ui/
F: include/ui/
@ -2791,7 +2857,7 @@ F: docs/devel/ui.rst
Cocoa graphics
M: Peter Maydell <peter.maydell@linaro.org>
M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Akihiko Odaki <akihiko.odaki@gmail.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Odd Fixes
F: ui/cocoa.m
@ -2816,13 +2882,15 @@ F: qapi/run-state.json
Read, Copy, Update (RCU)
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: docs/devel/lockcnt.txt
F: docs/devel/rcu.txt
F: include/qemu/rcu*.h
F: tests/unit/rcutorture.c
F: tests/unit/test-rcu-*.c
F: util/rcu.c
Human Monitor (HMP)
M: Dr. David Alan Gilbert <dgilbert@redhat.com>
M: Dr. David Alan Gilbert <dave@treblig.org>
S: Maintained
F: monitor/monitor-internal.h
F: monitor/misc.c
@ -2862,9 +2930,11 @@ T: git https://gitlab.com/ehabkost/qemu.git machine-next
Cryptodev Backends
M: Gonglei <arei.gonglei@huawei.com>
M: zhenwei pi <pizhenwei@bytedance.com>
S: Maintained
F: include/sysemu/cryptodev*.h
F: backends/cryptodev*.c
F: qapi/cryptodev.json
Python library
M: John Snow <jsnow@redhat.com>
@ -3093,7 +3163,8 @@ F: scripts/checkpatch.pl
Migration
M: Juan Quintela <quintela@redhat.com>
M: Dr. David Alan Gilbert <dgilbert@redhat.com>
R: Peter Xu <peterx@redhat.com>
R: Leonardo Bras <leobras@redhat.com>
S: Maintained
F: hw/core/vmstate-if.c
F: include/hw/vmstate-if.h
@ -3237,6 +3308,7 @@ S: Supported
F: replay/*
F: block/blkreplay.c
F: net/filter-replay.c
F: include/exec/replay-core.h
F: include/sysemu/replay.h
F: docs/devel/replay.rst
F: docs/system/replay.rst
@ -3301,8 +3373,6 @@ F: roms/edk2
F: roms/edk2-*
F: tests/data/uefi-boot-images/
F: tests/uefi-test-tools/
F: .gitlab-ci.d/edk2.yml
F: .gitlab-ci.d/edk2/
VT-d Emulation
M: Michael S. Tsirkin <mst@redhat.com>
@ -3313,6 +3383,10 @@ F: hw/i386/intel_iommu.c
F: hw/i386/intel_iommu_internal.h
F: include/hw/i386/intel_iommu.h
AMD-Vi Emulation
S: Orphan
F: hw/i386/amd_iommu.?
OpenSBI Firmware
M: Bin Meng <bmeng.cn@gmail.com>
S: Supported
@ -3322,7 +3396,7 @@ F: .gitlab-ci.d/opensbi/
Clock framework
M: Luc Michel <luc@lmichel.fr>
R: Damien Hedde <damien.hedde@greensocs.com>
R: Damien Hedde <damien.hedde@dahe.fr>
S: Maintained
F: include/hw/clock.h
F: include/hw/qdev-clock.h
@ -3760,6 +3834,7 @@ F: scripts/ci/
F: tests/docker/
F: tests/vm/
F: tests/lcitool/
F: tests/avocado/tuxrun_baselines.py
F: scripts/archive-source.sh
F: docs/devel/testing.rst
W: https://gitlab.com/qemu-project/qemu/pipelines
@ -3776,8 +3851,7 @@ W: https://cirrus-ci.com/github/qemu/qemu
Windows Hosted Continuous Integration
M: Yonggang Luo <luoyonggang@gmail.com>
S: Maintained
F: .cirrus.yml
W: https://cirrus-ci.com/github/qemu/qemu
F: .gitlab-ci.d/windows.yml
Guest Test Compilation Support
M: Alex Bennée <alex.bennee@linaro.org>
@ -3843,6 +3917,16 @@ F: configure
F: scripts/mtest2make.py
F: tests/Makefile.include
Kconfig
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: scripts/minikconf.py
F: docs/devel/kconfig.rst
F: Kconfig*
F: */Kconfig*
F: hw/*/Kconfig*
F: target/*/Kconfig*
GIT submodules
M: Daniel P. Berrange <berrange@redhat.com>
S: Odd Fixes
@ -3866,3 +3950,8 @@ Performance Tools and Tests
M: Ahmed Karaman <ahmedkhaledkaraman@gmail.com>
S: Maintained
F: scripts/performance/
Code Coverage Tools
M: Alex Bennée <alex.bennee@linaro.org>
S: Odd Fixes
F: scripts/coverage/

View File

@ -26,7 +26,7 @@ quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1
quiet-@ = $(if $(V),,@)
quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
UNCHECKED_GOALS := TAGS gtags cscope ctags dist \
help check-help print-% \
docker docker-% vm-help vm-test vm-build-%
@ -115,15 +115,15 @@ Makefile.ninja: build.ninja
$(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \
} > $@.tmp && mv $@.tmp $@
-include Makefile.ninja
endif
ifneq ($(MESON),)
# A separate rule is needed for Makefile dependencies to avoid -n
build.ninja: build.ninja.stamp
$(build-files):
build.ninja.stamp: meson.stamp $(build-files)
$(NINJA) $(if $V,-v,) build.ninja && touch $@
endif
$(MESON) setup --reconfigure $(SRC_PATH) && touch $@
ifneq ($(MESON),)
Makefile.mtest: build.ninja scripts/mtest2make.py
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
-include Makefile.mtest
@ -176,10 +176,8 @@ plugins:
endif # $(CONFIG_PLUGIN)
else # config-host.mak does not exist
config-host.mak:
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
@echo "Please call configure before running make!"
@exit 1
$(error Please call configure before running make)
endif
endif # config-host.mak does not exist
@ -220,7 +218,7 @@ qemu-%.tar.bz2:
distclean: clean recurse-distclean
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
rm -f config-host.mak Makefile.prereqs qemu-bundle
rm -f config-host.mak Makefile.prereqs
rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak
rm -f config.status
rm -f roms/seabios/config.mak
@ -230,7 +228,7 @@ distclean: clean recurse-distclean
rm -f Makefile.ninja Makefile.mtest build.ninja.stamp meson.stamp
rm -f config.log
rm -f linux-headers/asm
rm -Rf .sdk
rm -Rf .sdk qemu-bundle
find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \
-type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)

View File

@ -1 +1 @@
7.2.50
8.0.50

View File

@ -27,7 +27,7 @@
#include "qemu/accel.h"
#include "hw/boards.h"
#include "sysemu/cpus.h"
#include "qemu/error-report.h"
#include "accel-softmmu.h"
int accel_init_machine(AccelState *accel, MachineState *ms)

View File

@ -86,6 +86,13 @@ static bool kvm_cpus_are_resettable(void)
return !kvm_enabled() || kvm_cpu_check_are_resettable();
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
static int kvm_update_guest_debug_ops(CPUState *cpu)
{
return kvm_update_guest_debug(cpu, 0);
}
#endif
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
@ -99,6 +106,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
#ifdef KVM_CAP_SET_GUEST_DEBUG
ops->update_guest_debug = kvm_update_guest_debug_ops;
ops->supports_guest_debug = kvm_supports_guest_debug;
ops->insert_breakpoint = kvm_insert_breakpoint;
ops->remove_breakpoint = kvm_remove_breakpoint;

View File

@ -685,6 +685,15 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
uint32_t ring_size = s->kvm_dirty_ring_size;
uint32_t count = 0, fetch = cpu->kvm_fetch_index;
/*
* It's possible that we race with vcpu creation code where the vcpu is
* put onto the vcpus list but not yet initialized the dirty ring
* structures. If so, skip it.
*/
if (!cpu->created) {
return 0;
}
assert(dirty_gfns && ring_size);
trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
@ -1352,6 +1361,10 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
*/
if (kvm_state->kvm_dirty_ring_size) {
kvm_dirty_ring_reap_locked(kvm_state, NULL);
if (kvm_state->kvm_dirty_ring_with_bitmap) {
kvm_slot_sync_dirty_pages(mem);
kvm_slot_get_dirty_log(kvm_state, mem);
}
} else {
kvm_slot_get_dirty_log(kvm_state, mem);
}
@ -1449,6 +1462,69 @@ static int kvm_dirty_ring_reaper_init(KVMState *s)
return 0;
}
static int kvm_dirty_ring_init(KVMState *s)
{
uint32_t ring_size = s->kvm_dirty_ring_size;
uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
int ret;
s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_bytes = 0;
/* Bail if the dirty ring size isn't specified */
if (!ring_size) {
return 0;
}
/*
* Read the max supported pages. Fall back to dirty logging mode
* if the dirty ring isn't supported.
*/
ret = kvm_vm_check_extension(s, capability);
if (ret <= 0) {
capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
ret = kvm_vm_check_extension(s, capability);
}
if (ret <= 0) {
warn_report("KVM dirty ring not available, using bitmap method");
return 0;
}
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
return -EINVAL;
}
ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested minimum value is 1024.", strerror(-ret));
return -EIO;
}
/* Enable the backup bitmap if it is supported */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
if (ret > 0) {
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
if (ret) {
error_report("Enabling of KVM dirty ring's backup bitmap failed: "
"%s. ", strerror(-ret));
return -EIO;
}
s->kvm_dirty_ring_with_bitmap = true;
}
s->kvm_dirty_ring_size = ring_size;
s->kvm_dirty_ring_bytes = ring_bytes;
return 0;
}
static void kvm_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
@ -1554,7 +1630,7 @@ static void kvm_log_sync(MemoryListener *listener,
kvm_slots_unlock();
}
static void kvm_log_sync_global(MemoryListener *l)
static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
{
KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
KVMState *s = kvm_state;
@ -1573,6 +1649,12 @@ static void kvm_log_sync_global(MemoryListener *l)
mem = &kml->slots[i];
if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_slot_sync_dirty_pages(mem);
if (s->kvm_dirty_ring_with_bitmap && last_stage &&
kvm_slot_get_dirty_log(s, mem)) {
kvm_slot_sync_dirty_pages(mem);
}
/*
* This is not needed by KVM_GET_DIRTY_LOG because the
* ioctl will unconditionally overwrite the whole region.
@ -2361,13 +2443,13 @@ static int kvm_init(MachineState *ms)
static const char upgrade_note[] =
"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
"(see http://sourceforge.net/projects/kvm).\n";
struct {
const struct {
const char *name;
int num;
} num_cpus[] = {
{ "SMP", ms->smp.cpus },
{ "hotpluggable", ms->smp.max_cpus },
{ NULL, }
{ /* end of list */ }
}, *nc = num_cpus;
int soft_vcpus_limit, hard_vcpus_limit;
KVMState *s;
@ -2512,37 +2594,11 @@ static int kvm_init(MachineState *ms)
* Enable KVM dirty ring if supported, otherwise fall back to
* dirty logging mode
*/
if (s->kvm_dirty_ring_size > 0) {
uint64_t ring_bytes;
ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* Read the max supported pages */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
if (ret > 0) {
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
s->kvm_dirty_ring_size,
(long)ret / sizeof(struct kvm_dirty_gfn));
ret = -EINVAL;
ret = kvm_dirty_ring_init(s);
if (ret < 0) {
goto err;
}
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested minimum value is 1024.", strerror(-ret));
goto err;
}
s->kvm_dirty_ring_bytes = ring_bytes;
} else {
warn_report("KVM dirty ring not available, using bitmap method");
s->kvm_dirty_ring_size = 0;
}
}
/*
* KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
* enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
@ -3305,7 +3361,7 @@ bool kvm_supports_guest_debug(void)
return kvm_has_guest_debug;
}
int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
{
struct kvm_sw_breakpoint *bp;
int err;
@ -3343,7 +3399,7 @@ int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
return 0;
}
int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
{
struct kvm_sw_breakpoint *bp;
int err;
@ -3701,8 +3757,12 @@ static void kvm_accel_instance_init(Object *obj)
s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
/* KVM dirty ring is by default off */
s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_with_bitmap = false;
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
s->notify_window = 0;
s->xen_version = 0;
s->xen_gnttab_max_frames = 64;
s->xen_evtchn_max_pirq = 256;
}
/**

View File

@ -19,8 +19,8 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu);
void kvm_cpu_synchronize_post_init(CPUState *cpu);
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
bool kvm_supports_guest_debug(void);
int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
void kvm_remove_all_breakpoints(CPUState *cpu);
#endif /* KVM_CPUS_H */

View File

@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
#include "exec/tb-flush.h"
#include "exec/exec-all.h"
void tb_flush(CPUState *cpu)
@ -25,7 +26,7 @@ void tcg_flush_jmp_cache(CPUState *cpu)
{
}
int probe_access_flags(CPUArchState *env, target_ulong addr,
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{

View File

@ -13,26 +13,12 @@
* See the COPYING file in the top-level directory.
*/
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}
#if HAVE_ATOMIC128
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
#endif
/*
* Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_*
@ -40,7 +26,7 @@ static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
*/
#define CMPXCHG_HELPER(OP, TYPE) \
TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \
TYPE oldv, TYPE newv, uint32_t oi) \
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
@ -62,36 +48,16 @@ CMPXCHG_HELPER(cmpxchgo_le, Int128)
#undef CMPXCHG_HELPER
Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr,
Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC();
Int128 oldv;
oldv = cpu_ld16_be_mmu(env, addr, oi, ra);
oldv = cpu_ld16_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) {
cpu_st16_be_mmu(env, addr, newv, oi, ra);
} else {
/* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra);
}
return oldv;
#else
g_assert_not_reached();
#endif
}
Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC();
Int128 oldv;
oldv = cpu_ld16_le_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) {
cpu_st16_le_mmu(env, addr, newv, oi, ra);
cpu_st16_mmu(env, addr, newv, oi, ra);
} else {
/* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra);
@ -103,7 +69,7 @@ Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
}
#define ATOMIC_HELPER(OP, TYPE) \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \
TYPE val, uint32_t oi) \
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }

View File

@ -73,8 +73,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@ -87,38 +86,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return ret;
}
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return val;
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, val);
@ -131,9 +103,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@ -163,9 +134,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
@ -188,7 +158,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA SIZE >= 16 */
#endif /* DATA SIZE < 16 */
#undef END
@ -206,8 +176,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@ -220,39 +189,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return BSWAP(ret);
}
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return BSWAP(val);
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
val = BSWAP(val);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
ABI_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
@ -265,9 +206,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@ -294,9 +234,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
@ -326,7 +265,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD
#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA_SIZE >= 16 */
#endif /* DATA_SIZE < 16 */
#undef END
#endif /* DATA_SIZE > 1 */

View File

@ -21,6 +21,8 @@
#include "sysemu/cpus.h"
#include "sysemu/tcg.h"
#include "exec/exec-all.h"
#include "qemu/plugin.h"
#include "internal.h"
bool tcg_allowed;
@ -65,6 +67,8 @@ void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
cpu->can_do_io = 1;
/* Undo any setting in generated code. */
qemu_plugin_disable_mem_helpers(cpu);
siglongjmp(cpu->jmp_env, 1);
}
@ -78,6 +82,8 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
{
/* Prevent looping if already executing in a serial context. */
g_assert(!cpu_in_serial_context(cpu));
cpu->exception_index = EXCP_ATOMIC;
cpu_loop_exit_restore(cpu, pc);
}

View File

@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/qemu-print.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/type-helpers.h"
#include "hw/core/tcg-cpu-ops.h"
#include "trace.h"
@ -28,7 +27,6 @@
#include "exec/exec-all.h"
#include "tcg/tcg.h"
#include "qemu/atomic.h"
#include "qemu/timer.h"
#include "qemu/rcu.h"
#include "exec/log.h"
#include "qemu/main-loop.h"
@ -38,7 +36,7 @@
#include "sysemu/cpus.h"
#include "exec/cpu-all.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/replay.h"
#include "exec/replay-core.h"
#include "sysemu/tcg.h"
#include "exec/helper-proto.h"
#include "tb-jmp-cache.h"
@ -64,8 +62,8 @@ typedef struct SyncClocks {
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
static int64_t max_delay;
static int64_t max_advance;
int64_t max_delay;
int64_t max_advance;
static void align_clocks(SyncClocks *sc, CPUState *cpu)
{
@ -161,7 +159,7 @@ uint32_t curr_cflags(CPUState *cpu)
*/
if (unlikely(cpu->singlestep_enabled)) {
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
} else if (singlestep) {
} else if (qatomic_read(&one_insn_per_tb)) {
cflags |= CF_NO_GOTO_TB | 1;
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
cflags |= CF_NO_GOTO_TB;
@ -185,7 +183,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
tb_page_addr0(tb) == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
@ -237,7 +235,7 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
return NULL;
}
desc.page_addr0 = phys_pc;
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@ -256,10 +254,13 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
hash = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
tb = tb_jmp_cache_get_tb(jc, hash);
if (cflags & CF_PCREL) {
/* Use acquire to ensure current load of pc from jc. */
tb = qatomic_load_acquire(&jc->array[hash].tb);
if (likely(tb &&
tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
jc->array[hash].pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
@ -270,7 +271,29 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
if (tb == NULL) {
return NULL;
}
tb_jmp_cache_set(jc, hash, tb, pc);
jc->array[hash].pc = pc;
/* Ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
} else {
/* Use rcu_read to ensure current load of pc from *tb. */
tb = qatomic_rcu_read(&jc->array[hash].tb);
if (likely(tb &&
tb->pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
}
return tb;
}
@ -284,7 +307,6 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc));
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
@ -300,7 +322,6 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
qemu_log_unlock(logfile);
}
}
#endif /* DEBUG_DISAS */
}
}
@ -436,6 +457,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1;
qemu_plugin_disable_mem_helpers(cpu);
/*
* TODO: Delay swapping back to the read-write region of the TB
* until we actually need to modify the TB. The read-only copy,
@ -459,9 +481,9 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
assert(!TARGET_TB_PCREL);
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
assert(cc->set_pc);
cc->set_pc(cpu, tb_pc(last_tb));
cc->set_pc(cpu, last_tb->pc);
}
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
target_ulong pc = log_pc(cpu, last_tb);
@ -503,7 +525,6 @@ static void cpu_exec_exit(CPUState *cpu)
if (cc->tcg_ops->cpu_exec_exit) {
cc->tcg_ops->cpu_exec_exit(cpu);
}
QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL);
}
void cpu_exec_step_atomic(CPUState *cpu)
@ -557,7 +578,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
qemu_plugin_disable_mem_helpers(cpu);
}
/*
@ -979,17 +999,27 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
CPUJumpCache *jc;
uint32_t h;
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
jc = cpu->tb_jmp_cache;
if (cflags & CF_PCREL) {
jc->array[h].pc = pc;
/* Ensure pc is written first. */
qatomic_store_release(&jc->array[h].tb, tb);
} else {
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[h].tb, tb);
}
}
#ifndef CONFIG_USER_ONLY
@ -1012,7 +1042,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
#if !TARGET_TB_PCREL
if (last_tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
mmap_lock();
TranslationBlock *edge = libafl_gen_edge(cpu, tb_pc(last_tb), tb_pc(tb),
TranslationBlock *edge = libafl_gen_edge(cpu, last_tb->pc, tb->pc,
tb_exit, cs_base, flags, cflags);
mmap_unlock();
@ -1035,7 +1065,6 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL);
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(sc, cpu);
@ -1060,7 +1089,6 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
qemu_plugin_disable_mem_helpers(cpu);
assert_no_pages_locked();
}
@ -1133,86 +1161,3 @@ void tcg_exec_unrealizefn(CPUState *cpu)
tlb_destroy(cpu);
g_free_rcu(cpu->tb_jmp_cache, rcu);
}
#ifndef CONFIG_USER_ONLY
static void dump_drift_info(GString *buf)
{
if (!icount_enabled()) {
return;
}
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
(cpu_get_clock() - icount_get()) / SCALE_MS);
if (icount_align_option) {
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
-max_delay / SCALE_MS);
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
max_advance / SCALE_MS);
} else {
g_string_append_printf(buf, "Max guest delay NA\n");
g_string_append_printf(buf, "Max guest advance NA\n");
}
}
HumanReadableText *qmp_x_query_jit(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "JIT information is only available with accel=tcg");
return NULL;
}
dump_exec_info(buf);
dump_drift_info(buf);
return human_readable_text_from_str(buf);
}
HumanReadableText *qmp_x_query_opcount(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "Opcode count information is only available with accel=tcg");
return NULL;
}
tcg_dump_op_count(buf);
return human_readable_text_from_str(buf);
}
#ifdef CONFIG_PROFILER
int64_t dev_time;
HumanReadableText *qmp_x_query_profile(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
static int64_t last_cpu_exec_time;
int64_t cpu_exec_time;
int64_t delta;
cpu_exec_time = tcg_cpu_exec_time();
delta = cpu_exec_time - last_cpu_exec_time;
g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
delta, delta / (double)NANOSECONDS_PER_SECOND);
last_cpu_exec_time = cpu_exec_time;
dev_time = 0;
return human_readable_text_from_str(buf);
}
#else
HumanReadableText *qmp_x_query_profile(Error **errp)
{
error_setg(errp, "Internal profiler not compiled");
return NULL;
}
#endif
#endif /* !CONFIG_USER_ONLY */

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +0,0 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "exec/exec-all.h"
#include "monitor/monitor.h"
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);

View File

@ -57,11 +57,25 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
/* Return the current PC from CPU, which may be cached in TB. */
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
{
#if TARGET_TB_PCREL
if (tb_cflags(tb) & CF_PCREL) {
return cpu->cc->get_pc(cpu);
#else
return tb_pc(tb);
#endif
} else {
return tb->pc;
}
}
/*
* Return true if CS is not running in parallel with other cpus, either
* because there are no other cpus or we are within an exclusive context.
*/
static inline bool cpu_in_serial_context(CPUState *cs)
{
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
}
extern int64_t max_delay;
extern int64_t max_advance;
extern bool one_insn_per_tb;
#endif /* ACCEL_TCG_INTERNAL_H */

File diff suppressed because it is too large Load Diff

View File

@ -26,7 +26,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
return cpu_ldw_be_mmu(env, addr, oi, ra);
return cpu_ldw_mmu(env, addr, oi, ra);
}
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@ -39,21 +39,21 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
return cpu_ldl_be_mmu(env, addr, oi, ra);
return cpu_ldl_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_be_mmu(env, addr, oi, ra);
return cpu_ldq_mmu(env, addr, oi, ra);
}
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
return cpu_ldw_le_mmu(env, addr, oi, ra);
return cpu_ldw_mmu(env, addr, oi, ra);
}
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@ -66,14 +66,14 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
return cpu_ldl_le_mmu(env, addr, oi, ra);
return cpu_ldl_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_le_mmu(env, addr, oi, ra);
return cpu_ldq_mmu(env, addr, oi, ra);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
@ -87,42 +87,42 @@ void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
cpu_stw_be_mmu(env, addr, val, oi, ra);
cpu_stw_mmu(env, addr, val, oi, ra);
}
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
cpu_stl_be_mmu(env, addr, val, oi, ra);
cpu_stl_mmu(env, addr, val, oi, ra);
}
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
cpu_stq_be_mmu(env, addr, val, oi, ra);
cpu_stq_mmu(env, addr, val, oi, ra);
}
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
cpu_stw_le_mmu(env, addr, val, oi, ra);
cpu_stw_mmu(env, addr, val, oi, ra);
}
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
cpu_stl_le_mmu(env, addr, val, oi, ra);
cpu_stl_mmu(env, addr, val, oi, ra);
}
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
cpu_stq_le_mmu(env, addr, val, oi, ra);
cpu_stq_mmu(env, addr, val, oi, ra);
}
/*--------------------------*/

View File

@ -18,7 +18,7 @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
'cputlb.c',
'hmp.c',
'monitor.c',
))
tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(

120
accel/tcg/monitor.c Normal file
View File

@ -0,0 +1,120 @@
/*
* SPDX-License-Identifier: LGPL-2.1-or-later
*
* QEMU TCG monitor
*
* Copyright (c) 2003-2005 Fabrice Bellard
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
#include "qapi/error.h"
#include "qapi/type-helpers.h"
#include "qapi/qapi-commands-machine.h"
#include "monitor/monitor.h"
#include "sysemu/cpus.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/tcg.h"
#include "internal.h"
static void dump_drift_info(GString *buf)
{
if (!icount_enabled()) {
return;
}
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
(cpu_get_clock() - icount_get()) / SCALE_MS);
if (icount_align_option) {
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
-max_delay / SCALE_MS);
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
max_advance / SCALE_MS);
} else {
g_string_append_printf(buf, "Max guest delay NA\n");
g_string_append_printf(buf, "Max guest advance NA\n");
}
}
static void dump_accel_info(GString *buf)
{
AccelState *accel = current_accel();
bool one_insn_per_tb = object_property_get_bool(OBJECT(accel),
"one-insn-per-tb",
&error_fatal);
g_string_append_printf(buf, "Accelerator settings:\n");
g_string_append_printf(buf, "one-insn-per-tb: %s\n\n",
one_insn_per_tb ? "on" : "off");
}
HumanReadableText *qmp_x_query_jit(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "JIT information is only available with accel=tcg");
return NULL;
}
dump_accel_info(buf);
dump_exec_info(buf);
dump_drift_info(buf);
return human_readable_text_from_str(buf);
}
HumanReadableText *qmp_x_query_opcount(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp,
"Opcode count information is only available with accel=tcg");
return NULL;
}
tcg_dump_op_count(buf);
return human_readable_text_from_str(buf);
}
#ifdef CONFIG_PROFILER
int64_t dev_time;
HumanReadableText *qmp_x_query_profile(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
static int64_t last_cpu_exec_time;
int64_t cpu_exec_time;
int64_t delta;
cpu_exec_time = tcg_cpu_exec_time();
delta = cpu_exec_time - last_cpu_exec_time;
g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
delta, delta / (double)NANOSECONDS_PER_SECOND);
last_cpu_exec_time = cpu_exec_time;
dev_time = 0;
return human_readable_text_from_str(buf);
}
#else
HumanReadableText *qmp_x_query_profile(Error **errp)
{
error_setg(errp, "Internal profiler not compiled");
return NULL;
}
#endif
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);

View File

@ -328,7 +328,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
for (insn = 0; insn < tb->icount; insn++) {
/* FIXME: This replicates the restore_state_to_opc() logic. */
q[insn].address = tcg_ctx->gen_insn_data[insn][0];
if (TARGET_TB_PCREL) {
if (tb_cflags(tb) & CF_PCREL) {
q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
} else {
#if defined(TARGET_I386)

View File

@ -44,6 +44,7 @@
*/
#include "qemu/osdep.h"
#include "tcg/tcg.h"
#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "exec/exec-all.h"
#include "exec/plugin-gen.h"
@ -91,30 +92,12 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
void *userdata)
{ }
static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
{
TCGv_i32 cpu_index = tcg_temp_new_i32();
TCGv_i32 meminfo = tcg_const_i32(info);
TCGv_i64 vaddr64 = tcg_temp_new_i64();
TCGv_ptr udata = tcg_const_ptr(NULL);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
tcg_gen_extu_tl_i64(vaddr64, vaddr);
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i64(vaddr64);
tcg_temp_free_i32(meminfo);
tcg_temp_free_i32(cpu_index);
}
static void gen_empty_udata_cb(void)
{
TCGv_i32 cpu_index = tcg_temp_new_i32();
TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
@ -129,9 +112,10 @@ static void gen_empty_udata_cb(void)
*/
static void gen_empty_inline_cb(void)
{
TCGv_i64 val = tcg_temp_new_i64();
TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
TCGv_i64 val = tcg_temp_ebb_new_i64();
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_ld_i64(val, ptr, 0);
/* pass an immediate != 0 so that it doesn't get optimized away */
tcg_gen_addi_i64(val, val, 0xdeadface);
@ -140,9 +124,22 @@ static void gen_empty_inline_cb(void)
tcg_temp_free_i64(val);
}
static void gen_empty_mem_cb(TCGv addr, uint32_t info)
static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
{
do_gen_mem_cb(addr, info);
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_i32(meminfo, info);
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i32(meminfo);
tcg_temp_free_i32(cpu_index);
}
/*
@ -151,9 +148,9 @@ static void gen_empty_mem_cb(TCGv addr, uint32_t info)
*/
static void gen_empty_mem_helper(void)
{
TCGv_ptr ptr;
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
ptr = tcg_const_ptr(NULL);
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
@ -197,35 +194,17 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
}
}
union mem_gen_fn {
void (*mem_fn)(TCGv, uint32_t);
void (*inline_fn)(void);
};
static void gen_mem_wrapped(enum plugin_gen_cb type,
const union mem_gen_fn *f, TCGv addr,
uint32_t info, bool is_mem)
void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
{
enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
if (is_mem) {
f->mem_fn(addr, info);
} else {
f->inline_fn();
}
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw);
gen_empty_mem_cb(addr, info);
tcg_gen_plugin_cb_end();
}
void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
{
union mem_gen_fn fn;
fn.mem_fn = gen_empty_mem_cb;
gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
fn.inline_fn = gen_empty_inline_cb;
gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw);
gen_empty_inline_cb();
tcg_gen_plugin_cb_end();
}
static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
@ -275,33 +254,6 @@ static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
return op;
}
static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
/* mov_i32 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
/* mov_i32 w/ $0 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
/* extu_i32_i64 */
op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
}
return op;
}
static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
/* 2x mov_i32 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
/* mov_i64 */
op = copy_op(begin_op, op, INDEX_op_mov_i64);
}
return op;
}
static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
{
if (UINTPTR_MAX == UINT32_MAX) {
@ -316,18 +268,6 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
return op;
}
static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
{
if (TARGET_LONG_BITS == 32) {
/* extu_i32_i64 */
op = copy_extu_i32_i64(begin_op, op);
} else {
/* mov_i64 */
op = copy_mov_i64(begin_op, op);
}
return op;
}
static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
@ -472,9 +412,6 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
}
/* extu_tl_i64 */
op = copy_extu_tl_i64(&begin_op, op);
if (type == PLUGIN_GEN_CB_MEM) {
/* call */
op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
@ -626,8 +563,6 @@ static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
void plugin_gen_disable_mem_helpers(void)
{
TCGv_ptr ptr;
/*
* We could emit the clearing unconditionally and be done. However, this can
* be wasteful if for instance plugins don't track memory accesses, or if
@ -640,10 +575,8 @@ void plugin_gen_disable_mem_helpers(void)
if (!tcg_ctx->plugin_tb->mem_helper) {
return;
}
ptr = tcg_const_ptr(NULL);
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env,
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
}
static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,

View File

@ -14,53 +14,15 @@
/*
* Accessed in parallel; all accesses to 'tb' must be atomic.
* For TARGET_TB_PCREL, accesses to 'pc' must be protected by
* a load_acquire/store_release to 'tb'.
* For CF_PCREL, accesses to 'pc' must be protected by a
* load_acquire/store_release to 'tb'.
*/
struct CPUJumpCache {
struct rcu_head rcu;
struct {
TranslationBlock *tb;
#if TARGET_TB_PCREL
target_ulong pc;
#endif
} array[TB_JMP_CACHE_SIZE];
};
static inline TranslationBlock *
tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
{
#if TARGET_TB_PCREL
/* Use acquire to ensure current load of pc from jc. */
return qatomic_load_acquire(&jc->array[hash].tb);
#else
/* Use rcu_read to ensure current load of pc from *tb. */
return qatomic_rcu_read(&jc->array[hash].tb);
#endif
}
static inline target_ulong
tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return jc->array[hash].pc;
#else
return tb_pc(tb);
#endif
}
static inline void
tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
TranslationBlock *tb, target_ulong pc)
{
#if TARGET_TB_PCREL
jc->array[hash].pc = pc;
/* Use store_release on tb to ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
#else
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
#endif
}
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */

View File

@ -19,9 +19,11 @@
#include "qemu/osdep.h"
#include "qemu/interval-tree.h"
#include "qemu/qtree.h"
#include "exec/cputlb.h"
#include "exec/log.h"
#include "exec/exec-all.h"
#include "exec/tb-flush.h"
#include "exec/translate-all.h"
#include "sysemu/tcg.h"
#include "tcg/tcg.h"
@ -44,7 +46,7 @@ static bool tb_cmp(const void *ap, const void *bp)
const TranslationBlock *a = ap;
const TranslationBlock *b = bp;
return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
return ((tb_cflags(a) & CF_PCREL || a->pc == b->pc) &&
a->cs_base == b->cs_base &&
a->flags == b->flags &&
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
@ -125,29 +127,29 @@ static void tb_remove(TranslationBlock *tb)
}
/* TODO: For now, still shared with translate-all.c for system mode. */
#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
for (T = foreach_tb_first(start, end), \
N = foreach_tb_next(T, start, end); \
#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \
for (T = foreach_tb_first(start, last), \
N = foreach_tb_next(T, start, last); \
T != NULL; \
T = N, N = foreach_tb_next(N, start, end))
T = N, N = foreach_tb_next(N, start, last))
typedef TranslationBlock *PageForEachNext;
static PageForEachNext foreach_tb_first(tb_page_addr_t start,
tb_page_addr_t end)
tb_page_addr_t last)
{
IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1);
IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last);
return n ? container_of(n, TranslationBlock, itree) : NULL;
}
static PageForEachNext foreach_tb_next(PageForEachNext tb,
tb_page_addr_t start,
tb_page_addr_t end)
tb_page_addr_t last)
{
IntervalTreeNode *n;
if (tb) {
n = interval_tree_iter_next(&tb->itree, start, end - 1);
n = interval_tree_iter_next(&tb->itree, start, last);
if (n) {
return container_of(n, TranslationBlock, itree);
}
@ -313,12 +315,12 @@ struct page_entry {
* See also: page_collection_lock().
*/
struct page_collection {
GTree *tree;
QTree *tree;
struct page_entry *max;
};
typedef int PageForEachNext;
#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
#ifdef CONFIG_DEBUG_TCG
@ -466,7 +468,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
struct page_entry *pe;
PageDesc *pd;
pe = g_tree_lookup(set->tree, &index);
pe = q_tree_lookup(set->tree, &index);
if (pe) {
return false;
}
@ -477,7 +479,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
}
pe = page_entry_new(pd, index);
g_tree_insert(set->tree, &pe->index, pe);
q_tree_insert(set->tree, &pe->index, pe);
/*
* If this is either (1) the first insertion or (2) a page whose index
@ -509,30 +511,30 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
}
/*
* Lock a range of pages ([@start,@end[) as well as the pages of all
* Lock a range of pages ([@start,@last]) as well as the pages of all
* intersecting TBs.
* Locking order: acquire locks in ascending order of page index.
*/
static struct page_collection *page_collection_lock(tb_page_addr_t start,
tb_page_addr_t end)
tb_page_addr_t last)
{
struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index;
PageDesc *pd;
start >>= TARGET_PAGE_BITS;
end >>= TARGET_PAGE_BITS;
g_assert(start <= end);
last >>= TARGET_PAGE_BITS;
g_assert(start <= last);
set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
set->max = NULL;
assert_no_pages_locked();
retry:
g_tree_foreach(set->tree, page_entry_lock, NULL);
q_tree_foreach(set->tree, page_entry_lock, NULL);
for (index = start; index <= end; index++) {
for (index = start; index <= last; index++) {
TranslationBlock *tb;
PageForEachNext n;
@ -541,7 +543,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
continue;
}
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
g_tree_foreach(set->tree, page_entry_unlock, NULL);
q_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry;
}
assert_page_locked(pd);
@ -550,7 +552,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
(tb_page_addr1(tb) != -1 &&
page_trylock_add(set, tb_page_addr1(tb)))) {
/* drop all locks, and reacquire in order */
g_tree_foreach(set->tree, page_entry_unlock, NULL);
q_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry;
}
}
@ -561,7 +563,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
static void page_collection_unlock(struct page_collection *set)
{
/* entries are unlocked and freed via page_entry_destroy */
g_tree_destroy(set->tree);
q_tree_destroy(set->tree);
g_free(set);
}
@ -744,7 +746,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
tcg_region_reset_all();
/* XXX: flush processor icache at this point if cache flush is expensive */
qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
qatomic_inc(&tb_ctx.tb_flush_count);
done:
mmap_unlock();
@ -756,9 +758,9 @@ done:
void tb_flush(CPUState *cpu)
{
if (tcg_enabled()) {
unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
if (cpu_in_exclusive_context(cpu)) {
if (cpu_in_serial_context(cpu)) {
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
} else {
async_safe_run_on_cpu(cpu, do_tb_flush,
@ -847,13 +849,13 @@ static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
{
CPUState *cpu;
if (TARGET_TB_PCREL) {
if (tb_cflags(tb) & CF_PCREL) {
/* A TB may be at any virtual address */
CPU_FOREACH(cpu) {
tcg_flush_jmp_cache(cpu);
}
} else {
uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
uint32_t h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
CPUJumpCache *jc = cpu->tb_jmp_cache;
@ -885,7 +887,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */
phys_pc = tb_page_addr0(tb);
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, orig_cflags, tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
@ -966,7 +968,7 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_record(tb, p, p2);
/* add in the hash table */
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cflags, tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
@ -989,14 +991,14 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
* Called with mmap_lock held for user-mode emulation.
* NOTE: this function must not be called while a TB is running.
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
{
TranslationBlock *tb;
PageForEachNext n;
assert_memory_lock();
PAGE_FOR_EACH_TB(start, end, unused, tb, n) {
PAGE_FOR_EACH_TB(start, last, unused, tb, n) {
tb_phys_invalidate__locked(tb);
}
}
@ -1008,11 +1010,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
*/
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
tb_page_addr_t start, end;
tb_page_addr_t start, last;
start = addr & TARGET_PAGE_MASK;
end = start + TARGET_PAGE_SIZE;
tb_invalidate_phys_range(start, end);
last = addr | ~TARGET_PAGE_MASK;
tb_invalidate_phys_range(start, last);
}
/*
@ -1028,6 +1030,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
bool current_tb_modified;
TranslationBlock *tb;
PageForEachNext n;
tb_page_addr_t last;
/*
* Without precise smc semantics, or when outside of a TB,
@ -1044,10 +1047,11 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
assert_memory_lock();
current_tb = tcg_tb_lookup(pc);
last = addr | ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
current_tb_modified = false;
PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) {
PAGE_FOR_EACH_TB(addr, last, unused, tb, n) {
if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/*
@ -1079,11 +1083,10 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
static void
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
PageDesc *p, tb_page_addr_t start,
tb_page_addr_t end,
tb_page_addr_t last,
uintptr_t retaddr)
{
TranslationBlock *tb;
tb_page_addr_t tb_start, tb_end;
PageForEachNext n;
#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false;
@ -1091,22 +1094,22 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
#endif /* TARGET_HAS_PRECISE_SMC */
/*
* We remove all the TBs in the range [start, end[.
* We remove all the TBs in the range [start, last].
* XXX: see if in some cases it could be faster to invalidate all the code
*/
PAGE_FOR_EACH_TB(start, end, p, tb, n) {
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
tb_page_addr_t tb_start, tb_last;
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
tb_start = tb_page_addr0(tb);
tb_end = tb_start + tb->size;
tb_last = tb_start + tb->size - 1;
if (n == 0) {
tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK);
} else {
tb_start = tb_page_addr1(tb);
tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
& ~TARGET_PAGE_MASK);
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
}
if (!(tb_end <= start || tb_start >= end)) {
if (!(tb_last < start || tb_start > last)) {
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
@ -1148,7 +1151,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
tb_page_addr_t start, end;
tb_page_addr_t start, last;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
@ -1157,35 +1160,37 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
}
start = addr & TARGET_PAGE_MASK;
end = start + TARGET_PAGE_SIZE;
pages = page_collection_lock(start, end);
tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
last = addr | ~TARGET_PAGE_MASK;
pages = page_collection_lock(start, last);
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
page_collection_unlock(pages);
}
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;end[. NOTE: start and end may refer to *different* physical pages.
* [start;last]. NOTE: start and end may refer to *different* physical pages.
* 'is_cpu_write_access' should be true if called from a real cpu write
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
{
struct page_collection *pages;
tb_page_addr_t next;
tb_page_addr_t index, index_last;
pages = page_collection_lock(start, end);
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
start < end;
start = next, next += TARGET_PAGE_SIZE) {
PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
tb_page_addr_t bound = MIN(next, end);
pages = page_collection_lock(start, last);
index_last = last >> TARGET_PAGE_BITS;
for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
PageDesc *pd = page_find(index);
tb_page_addr_t bound;
if (pd == NULL) {
continue;
}
assert_page_locked(pd);
bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
bound = MIN(bound, last);
tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
}
page_collection_unlock(pages);
@ -1206,7 +1211,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
}
assert_page_locked(p);
tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
}
/*
@ -1220,7 +1225,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
{
struct page_collection *pages;
pages = page_collection_lock(ram_addr, ram_addr + size);
pages = page_collection_lock(ram_addr, ram_addr + size - 1);
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
page_collection_unlock(pages);
}

View File

@ -89,7 +89,20 @@ void icount_handle_deadline(void)
}
}
void icount_prepare_for_run(CPUState *cpu)
/* Distribute the budget evenly across all CPUs */
int64_t icount_percpu_budget(int cpu_count)
{
int64_t limit = icount_get_limit();
int64_t timeslice = limit / cpu_count;
if (timeslice == 0) {
timeslice = limit;
}
return timeslice;
}
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
{
int insns_left;
@ -101,13 +114,13 @@ void icount_prepare_for_run(CPUState *cpu)
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0);
cpu->icount_budget = icount_get_limit();
replay_mutex_lock();
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
insns_left = MIN(0xffff, cpu->icount_budget);
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
replay_mutex_lock();
if (cpu->icount_budget == 0) {
/*
* We're called without the iothread lock, so must take it while

View File

@ -11,7 +11,8 @@
#define TCG_ACCEL_OPS_ICOUNT_H
void icount_handle_deadline(void);
void icount_prepare_for_run(CPUState *cpu);
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget);
int64_t icount_percpu_budget(int cpu_count);
void icount_process_data(CPUState *cpu);
void icount_handle_interrupt(CPUState *cpu, int mask);

View File

@ -24,6 +24,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/lockable.h"
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "sysemu/cpu-timers.h"
@ -71,11 +72,13 @@ static void rr_kick_next_cpu(void)
{
CPUState *cpu;
do {
cpu = qatomic_mb_read(&rr_current_cpu);
cpu = qatomic_read(&rr_current_cpu);
if (cpu) {
cpu_exit(cpu);
}
} while (cpu != qatomic_mb_read(&rr_current_cpu));
/* Finish kicking this cpu before reading again. */
smp_mb();
} while (cpu != qatomic_read(&rr_current_cpu));
}
static void rr_kick_thread(void *opaque)
@ -139,6 +142,33 @@ static void rr_force_rcu(Notifier *notify, void *data)
rr_kick_next_cpu();
}
/*
* Calculate the number of CPUs that we will process in a single iteration of
* the main CPU thread loop so that we can fairly distribute the instruction
* count across CPUs.
*
* The CPU count is cached based on the CPU list generation ID to avoid
* iterating the list every time.
*/
static int rr_cpu_count(void)
{
static unsigned int last_gen_id = ~0;
static int cpu_count;
CPUState *cpu;
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
if (cpu_list_generation_id_get() != last_gen_id) {
cpu_count = 0;
CPU_FOREACH(cpu) {
++cpu_count;
}
last_gen_id = cpu_list_generation_id_get();
}
return cpu_count;
}
/*
* In the single-threaded case each vCPU is simulated in turn. If
* there is more than a single vCPU we create a simple timer to kick
@ -185,11 +215,16 @@ static void *rr_cpu_thread_fn(void *arg)
cpu->exit_request = 1;
while (1) {
/* Only used for icount_enabled() */
int64_t cpu_budget = 0;
qemu_mutex_unlock_iothread();
replay_mutex_lock();
qemu_mutex_lock_iothread();
if (icount_enabled()) {
int cpu_count = rr_cpu_count();
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
icount_account_warp_timer();
/*
@ -197,6 +232,8 @@ static void *rr_cpu_thread_fn(void *arg)
* waking up the I/O thread and waiting for completion.
*/
icount_handle_deadline();
cpu_budget = icount_percpu_budget(cpu_count);
}
replay_mutex_unlock();
@ -206,8 +243,9 @@ static void *rr_cpu_thread_fn(void *arg)
}
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
/* Store rr_current_cpu before evaluating cpu_can_run(). */
qatomic_mb_set(&rr_current_cpu, cpu);
current_cpu = cpu;
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
@ -218,7 +256,7 @@ static void *rr_cpu_thread_fn(void *arg)
qemu_mutex_unlock_iothread();
if (icount_enabled()) {
icount_prepare_for_run(cpu);
icount_prepare_for_run(cpu, cpu_budget);
}
r = tcg_cpus_exec(cpu);
if (icount_enabled()) {
@ -245,7 +283,7 @@ static void *rr_cpu_thread_fn(void *arg)
cpu = CPU_NEXT(cpu);
} /* while (cpu && !cpu->exit_request).. */
/* Does not need qatomic_mb_set because a spurious wakeup is okay. */
/* Does not need a memory barrier because a spurious wakeup is okay. */
qatomic_set(&rr_current_cpu, NULL);
if (cpu && cpu->exit_request) {

View File

@ -31,6 +31,7 @@
#include "sysemu/cpu-timers.h"
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "qemu/timer.h"
#include "exec/exec-all.h"
#include "exec/hwaddr.h"
#include "exec/gdbstub.h"
@ -44,10 +45,21 @@
void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
{
uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
uint32_t cflags;
/*
* Include the cluster number in the hash we use to look up TBs.
* This is important because a TB that is valid for one cluster at
* a given physical address and set of CPU flags is not necessarily
* valid for another:
* the two clusters may have different views of physical memory, or
* may have different CPU features (eg FPU present or absent).
*/
cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
cflags |= parallel ? CF_PARALLEL : 0;
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
cpu->tcg_cflags = cflags;
cpu->tcg_cflags |= cflags;
}
void tcg_cpus_destroy(CPUState *cpu)
@ -116,7 +128,7 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
return cputype;
}
static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
{
CPUState *cpu;
int err = 0;
@ -147,7 +159,7 @@ static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len
}
}
static int tcg_remove_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
{
CPUState *cpu;
int err = 0;

View File

@ -25,12 +25,13 @@
#include "qemu/osdep.h"
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "exec/replay-core.h"
#include "sysemu/cpu-timers.h"
#include "tcg/tcg.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/accel.h"
#include "qemu/atomic.h"
#include "qapi/qapi-builtin-visit.h"
#include "qemu/units.h"
#if !defined(CONFIG_USER_ONLY)
@ -42,6 +43,7 @@ struct TCGState {
AccelState parent_obj;
bool mttcg_enabled;
bool one_insn_per_tb;
int splitwx_enabled;
unsigned long tb_size;
};
@ -117,6 +119,7 @@ static void tcg_accel_instance_init(Object *obj)
}
bool mttcg_enabled;
bool one_insn_per_tb;
static int tcg_init_machine(MachineState *ms)
{
@ -216,6 +219,20 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
s->splitwx_enabled = value;
}
static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp)
{
TCGState *s = TCG_STATE(obj);
return s->one_insn_per_tb;
}
static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
{
TCGState *s = TCG_STATE(obj);
s->one_insn_per_tb = value;
/* Set the global also: this changes the behaviour */
qatomic_set(&one_insn_per_tb, value);
}
static int tcg_gdbstub_supported_sstep_flags(void)
{
/*
@ -253,6 +270,12 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
tcg_get_splitwx, tcg_set_splitwx);
object_class_property_set_description(oc, "split-wx",
"Map jit pages into separate RW and RX regions");
object_class_property_add_bool(oc, "one-insn-per-tb",
tcg_get_one_insn_per_tb,
tcg_set_one_insn_per_tb);
object_class_property_set_description(oc, "one-insn-per-tb",
"Only put one guest insn in each translation block");
}
static const TypeInfo tcg_accel_type = {

View File

@ -550,6 +550,17 @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
clear_high(d, oprsz, desc);
}
void HELPER(gvec_andcs)(void *d, void *a, uint64_t b, uint32_t desc)
{
intptr_t oprsz = simd_oprsz(desc);
intptr_t i;
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
*(uint64_t *)(d + i) = *(uint64_t *)(a + i) & ~b;
}
clear_high(d, oprsz, desc);
}
void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
{
intptr_t oprsz = simd_oprsz(desc);

View File

@ -39,62 +39,63 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
#endif /* IN_HELPER_PROTO */
DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32)
DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
i32, env, i64, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
i32, env, i64, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
i32, env, i64, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
i32, env, i64, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
i32, env, i64, i32, i32, i32)
#ifdef CONFIG_ATOMIC64
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
i64, env, tl, i64, i64, i32)
i64, env, i64, i64, i64, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
i64, env, tl, i64, i64, i32)
i64, env, i64, i64, i64, i32)
#endif
#ifdef CONFIG_CMPXCHG128
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, tl, i128, i128, i32)
i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
i128, env, tl, i128, i128, i32)
i128, env, i64, i128, i128, i32)
#endif
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, tl, i128, i128, i32)
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG,
i128, env, tl, i128, i128, i32)
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \
TCG_CALL_NO_WG, i64, env, tl, i64, i32) \
TCG_CALL_NO_WG, i64, env, i64, i64, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \
TCG_CALL_NO_WG, i64, env, tl, i64, i32)
TCG_CALL_NO_WG, i64, env, i64, i64, i32)
#else
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32)
TCG_CALL_NO_WG, i32, env, i64, i32, i32)
#endif /* CONFIG_ATOMIC64 */
GEN_ATOMIC_HELPERS(fetch_add)
@ -217,6 +218,7 @@ DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_andcs, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)

View File

@ -47,11 +47,12 @@
#include "exec/cputlb.h"
#include "exec/translate-all.h"
#include "exec/translator.h"
#include "exec/tb-flush.h"
#include "qemu/bitmap.h"
#include "qemu/qemu-print.h"
#include "qemu/timer.h"
#include "qemu/main-loop.h"
#include "qemu/cacheinfo.h"
#include "qemu/timer.h"
#include "exec/log.h"
#include "sysemu/cpus.h"
#include "sysemu/cpu-timers.h"
@ -70,6 +71,31 @@
#include "tcg/tcg-internal.h"
#include "exec/helper-head.h"
#include "tcg/tcg-temp-internal.h"
// reintroduce this in QEMU
static TCGv_i64 tcg_const_i64(int64_t val)
{
TCGv_i64 t0;
t0 = tcg_temp_new_i64();
tcg_gen_movi_i64(t0, val);
return t0;
}
#if TARGET_LONG_BITS == 32
static TCGv_i32 tcg_const_i32(int32_t val)
{
TCGv_i32 t0;
t0 = tcg_temp_new_i32();
tcg_gen_movi_i32(t0, val);
return t0;
}
#define tcg_const_tl tcg_const_i32
#else
#define tcg_const_tl tcg_const_i64
#endif
target_ulong libafl_gen_cur_pc;
void libafl_helper_table_add(TCGHelperInfo* info);
@ -77,8 +103,8 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
target_ulong dst_block, int exit_n,
target_ulong cs_base, uint32_t flags,
int cflags);
void libafl_gen_read(TCGv addr, MemOpIdx oi);
void libafl_gen_write(TCGv addr, MemOpIdx oi);
void libafl_gen_read(TCGTemp *addr, MemOpIdx oi);
void libafl_gen_write(TCGTemp *addr, MemOpIdx oi);
void libafl_gen_cmp(target_ulong pc, TCGv op0, TCGv op1, MemOp ot);
void libafl_gen_backdoor(target_ulong pc);
@ -298,7 +324,7 @@ void libafl_add_read_hook(uint64_t (*gen)(target_ulong pc, MemOpIdx oi, uint64_t
}
}
void libafl_gen_read(TCGv addr, MemOpIdx oi)
void libafl_gen_read(TCGTemp *addr, MemOpIdx oi)
{
size_t size = 1 << (oi & MO_SIZE);
@ -317,11 +343,7 @@ void libafl_gen_read(TCGv addr, MemOpIdx oi)
TCGv_i64 tmp0 = tcg_const_i64(cur_id);
TCGv_i64 tmp1 = tcg_const_i64(hook->data);
TCGTemp *tmp2[3] = { tcgv_i64_temp(tmp0),
#if TARGET_LONG_BITS == 32
tcgv_i32_temp(addr),
#else
tcgv_i64_temp(addr),
#endif
addr,
tcgv_i64_temp(tmp1) };
tcg_gen_callN(func, NULL, 3, tmp2);
tcg_temp_free_i64(tmp0);
@ -331,11 +353,10 @@ void libafl_gen_read(TCGv addr, MemOpIdx oi)
TCGv tmp1 = tcg_const_tl(size);
TCGv_i64 tmp2 = tcg_const_i64(hook->data);
TCGTemp *tmp3[4] = { tcgv_i64_temp(tmp0),
addr,
#if TARGET_LONG_BITS == 32
tcgv_i32_temp(addr),
tcgv_i32_temp(tmp1),
#else
tcgv_i64_temp(addr),
tcgv_i64_temp(tmp1),
#endif
tcgv_i64_temp(tmp2) };
@ -413,7 +434,7 @@ void libafl_add_write_hook(uint64_t (*gen)(target_ulong pc, MemOpIdx oi, uint64_
}
}
void libafl_gen_write(TCGv addr, MemOpIdx oi)
void libafl_gen_write(TCGTemp *addr, MemOpIdx oi)
{
size_t size = 1 << (oi & MO_SIZE);
@ -432,11 +453,7 @@ void libafl_gen_write(TCGv addr, MemOpIdx oi)
TCGv_i64 tmp0 = tcg_const_i64(cur_id);
TCGv_i64 tmp1 = tcg_const_i64(hook->data);
TCGTemp *tmp2[3] = { tcgv_i64_temp(tmp0),
#if TARGET_LONG_BITS == 32
tcgv_i32_temp(addr),
#else
tcgv_i64_temp(addr),
#endif
addr,
tcgv_i64_temp(tmp1) };
tcg_gen_callN(func, NULL, 3, tmp2);
tcg_temp_free_i64(tmp0);
@ -446,11 +463,10 @@ void libafl_gen_write(TCGv addr, MemOpIdx oi)
TCGv tmp1 = tcg_const_tl(size);
TCGv_i64 tmp2 = tcg_const_i64(hook->data);
TCGTemp *tmp3[4] = { tcgv_i64_temp(tmp0),
addr,
#if TARGET_LONG_BITS == 32
tcgv_i32_temp(addr),
tcgv_i32_temp(tmp1),
#else
tcgv_i64_temp(addr),
tcgv_i64_temp(tmp1),
#endif
tcgv_i64_temp(tmp2) };
@ -648,9 +664,11 @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
TBContext tb_ctx;
/* Encode VAL as a signed leb128 sequence at P.
Return P incremented past the encoded value. */
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
/*
* Encode VAL as a signed leb128 sequence at P.
* Return P incremented past the encoded value.
*/
static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
{
int more, byte;
@ -668,21 +686,23 @@ static uint8_t *encode_sleb128(uint8_t *p, target_long val)
return p;
}
/* Decode a signed leb128 sequence at *PP; increment *PP past the
decoded value. Return the decoded value. */
static target_long decode_sleb128(const uint8_t **pp)
/*
* Decode a signed leb128 sequence at *PP; increment *PP past the
* decoded value. Return the decoded value.
*/
static int64_t decode_sleb128(const uint8_t **pp)
{
const uint8_t *p = *pp;
target_long val = 0;
int64_t val = 0;
int byte, shift = 0;
do {
byte = *p++;
val |= (target_ulong)(byte & 0x7f) << shift;
val |= (int64_t)(byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
val |= -(target_ulong)1 << shift;
val |= -(int64_t)1 << shift;
}
*pp = p;
@ -708,11 +728,11 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
int i, j, n;
for (i = 0, n = tb->icount; i < n; ++i) {
target_ulong prev;
uint64_t prev;
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
if (i == 0) {
prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
} else {
prev = tcg_ctx->gen_insn_data[i - 1][j];
}
@ -747,8 +767,8 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
}
memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
if (!TARGET_TB_PCREL) {
data[0] = tb_pc(tb);
if (!(tb_cflags(tb) & CF_PCREL)) {
data[0] = tb->pc;
}
/*
@ -879,7 +899,7 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
//// --- End LibAFL code ---
gen_intermediate_code(env_cpu(env), tb, *max_insns, pc, host_pc);
gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
assert(tb->size != 0);
tcg_ctx->cpu = NULL;
*max_insns = tb->icount;
@ -1122,9 +1142,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
#if !TARGET_TB_PCREL
if (!(cflags & CF_PCREL)) {
tb->pc = pc;
#endif
}
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
@ -1132,6 +1152,13 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TCG_TYPE_TL;
#ifdef CONFIG_SOFTMMU
tcg_ctx->page_bits = TARGET_PAGE_BITS;
tcg_ctx->page_mask = TARGET_PAGE_MASK;
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
#endif
tb_overflow:
#ifdef CONFIG_PROFILER
@ -1197,8 +1224,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
/*
* For TARGET_TB_PCREL, attribute all executions of the generated
* code to its first mapping.
* For CF_PCREL, attribute all executions of the generated code
* to its first mapping.
*/
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
@ -1209,7 +1236,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
#endif
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(pc)) {
FILE *logfile = qemu_log_trylock();
@ -1232,7 +1258,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
/* Dump header and the first instruction */
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
fprintf(logfile,
" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
" -- guest addr 0x%016" PRIx64 " + tb prologue\n",
tcg_ctx->gen_insn_data[insn][0]);
chunk_start = tcg_ctx->gen_insn_end_off[insn];
disas(logfile, tb->tc.ptr, chunk_start);
@ -1245,7 +1271,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
while (insn < tb->icount) {
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
if (chunk_end > chunk_start) {
fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n",
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
tcg_ctx->gen_insn_data[insn][0]);
disas(logfile, tb->tc.ptr + chunk_start,
chunk_end - chunk_start);
@ -1282,7 +1308,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_unlock(logfile);
}
}
#endif
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
@ -1360,7 +1385,7 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
if (addr != -1) {
tb_invalidate_phys_range(addr, addr + 1);
tb_invalidate_phys_range(addr, addr);
}
}
}

View File

@ -16,11 +16,35 @@
#include "exec/log.h"
#include "exec/translator.h"
#include "exec/plugin-gen.h"
#include "sysemu/replay.h"
#include "exec/replay-core.h"
//// --- Begin LibAFL code ---
#include "tcg/tcg-internal.h"
#include "tcg/tcg-temp-internal.h"
// reintroduce this in QEMU
static TCGv_i64 tcg_const_i64(int64_t val)
{
TCGv_i64 t0;
t0 = tcg_temp_new_i64();
tcg_gen_movi_i64(t0, val);
return t0;
}
#if TARGET_LONG_BITS == 32
static TCGv_i32 tcg_const_i32(int32_t val)
{
TCGv_i32 t0;
t0 = tcg_temp_new_i32();
tcg_gen_movi_i32(t0, val);
return t0;
}
#define tcg_const_tl tcg_const_i32
#else
#define tcg_const_tl tcg_const_i64
#endif
extern target_ulong libafl_gen_cur_pc;
@ -54,19 +78,6 @@ extern struct libafl_backdoor_hook* libafl_backdoor_hooks;
//// --- End LibAFL code ---
/* Pairs with tcg_clear_temp_count.
To be called by #TranslatorOps.{translate_insn,tb_stop} if
(1) the target is sufficiently clean to support reporting,
(2) as and when all temporaries are known to be consumed.
For most targets, (2) is at the end of translate_insn. */
void translator_loop_temp_check(DisasContextBase *db)
{
if (tcg_check_temp_count()) {
qemu_log("warning: TCG temporary leaks before "
TARGET_FMT_lx "\n", db->pc_next);
}
}
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
{
/* Suppress goto_tb if requested. */
@ -78,7 +89,7 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
}
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc,
const TranslatorOps *ops, DisasContextBase *db)
{
@ -91,7 +102,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
db->pc_next = pc;
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = max_insns;
db->max_insns = *max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
@ -103,9 +114,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
/* Reset the temp count so that we can identify leaks */
tcg_clear_temp_count();
/* Start translating. */
gen_tb_start(db->tb);
ops->tb_start(db, cpu);
@ -114,7 +122,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
while (true) {
db->num_insns++;
*max_insns = ++db->num_insns;
ops->insn_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@ -248,7 +256,6 @@ post_translate_insn:
tb->size = db->pc_next - db->pc_first;
tb->icount = db->num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(db->pc_first)) {
FILE *logfile = qemu_log_trylock();
@ -259,7 +266,6 @@ post_translate_insn:
qemu_log_unlock(logfile);
}
}
#endif
}
static void *translator_access(CPUArchState *env, DisasContextBase *db,

View File

@ -1,6 +1,6 @@
#include "qemu/osdep.h"
#include "hw/core/cpu.h"
#include "sysemu/replay.h"
#include "exec/replay-core.h"
bool enable_cpu_pm = false;

View File

@ -480,24 +480,22 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
* The flag PAGE_WRITE_ORG is positioned automatically depending
* on PAGE_WRITE. The mmap_lock should already be held.
*/
void page_set_flags(target_ulong start, target_ulong end, int flags)
void page_set_flags(target_ulong start, target_ulong last, int flags)
{
target_ulong last;
bool reset = false;
bool inval_tb = false;
/* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
assert(start < end);
assert(end - 1 <= GUEST_ADDR_MAX);
assert(start <= last);
assert(last <= GUEST_ADDR_MAX);
/* Only set PAGE_ANON with new mappings. */
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock();
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
last = end - 1;
start &= TARGET_PAGE_MASK;
last |= ~TARGET_PAGE_MASK;
if (!(flags & PAGE_VALID)) {
flags = 0;
@ -510,7 +508,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
}
if (!flags || reset) {
page_reset_target_data(start, end);
page_reset_target_data(start, last);
inval_tb |= pageflags_unset(start, last);
}
if (flags) {
@ -518,7 +516,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
~(reset ? 0 : PAGE_STICKY));
}
if (inval_tb) {
tb_invalidate_phys_range(start, end);
tb_invalidate_phys_range(start, last);
}
}
@ -761,13 +759,14 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
}
int probe_access_flags(CPUArchState *env, target_ulong addr,
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra)
{
int flags;
flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
*phost = flags ? NULL : g2h(env_cpu(env), addr);
return flags;
}
@ -815,15 +814,14 @@ typedef struct TargetPageDataNode {
static IntervalTreeRoot targetdata_root;
void page_reset_target_data(target_ulong start, target_ulong end)
void page_reset_target_data(target_ulong start, target_ulong last)
{
IntervalTreeNode *n, *next;
target_ulong last;
assert_memory_lock();
start = start & TARGET_PAGE_MASK;
last = TARGET_PAGE_ALIGN(end) - 1;
start &= TARGET_PAGE_MASK;
last |= ~TARGET_PAGE_MASK;
for (n = interval_tree_iter_first(&targetdata_root, start, last),
next = n ? interval_tree_iter_next(n, start, last) : NULL;
@ -886,40 +884,14 @@ void *page_get_target_data(target_ulong address)
return t->data[(page - region) >> TARGET_PAGE_BITS];
}
#else
void page_reset_target_data(target_ulong start, target_ulong end) { }
void page_reset_target_data(target_ulong start, target_ulong last) { }
#endif /* TARGET_PAGE_DATA_SIZE */
/* The softmmu versions of these helpers are in cputlb.c. */
/*
* Verify that we have passed the correct MemOp to the correct function.
*
* We could present one function to target code, and dispatch based on
* the MemOp, but so far we have worked hard to avoid an indirect function
* call along the memory path.
*/
static void validate_memop(MemOpIdx oi, MemOp expected)
static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
#ifdef CONFIG_DEBUG_TCG
MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
assert(have == expected);
#endif
}
void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
{
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
}
void helper_unaligned_st(CPUArchState *env, target_ulong addr)
{
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
}
static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t ra, MMUAccessType type)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
void *ret;
@ -933,251 +905,320 @@ static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
return ret;
}
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
#include "ldst_atomicity.c.inc"
static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{
void *haddr;
uint8_t ret;
validate_memop(oi, MO_UB);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
tcg_debug_assert((mop & MO_SIZE) == MO_8);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = ldub_p(haddr);
clear_helper_retaddr();
return ret;
}
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return do_ld1_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra);
}
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{
void *haddr;
uint16_t ret;
validate_memop(oi, MO_BEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = lduw_be_p(haddr);
tcg_debug_assert((mop & MO_SIZE) == MO_16);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_2(env, ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
ret = bswap16(ret);
}
return ret;
}
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return do_ld2_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra);
}
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{
void *haddr;
uint32_t ret;
validate_memop(oi, MO_BEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldl_be_p(haddr);
tcg_debug_assert((mop & MO_SIZE) == MO_32);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_4(env, ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
ret = bswap32(ret);
}
return ret;
}
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return do_ld4_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra);
}
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{
void *haddr;
uint64_t ret;
validate_memop(oi, MO_BEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_be_p(haddr);
tcg_debug_assert((mop & MO_SIZE) == MO_64);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_8(env, ra, haddr, mop);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (mop & MO_BSWAP) {
ret = bswap64(ret);
}
return ret;
}
uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint16_t ret;
validate_memop(oi, MO_LEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = lduw_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
return do_ld8_mmu(env, addr, get_memop(oi), ra);
}
uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint32_t ret;
validate_memop(oi, MO_LEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldl_le_p(haddr);
clear_helper_retaddr();
uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint64_t ret;
validate_memop(oi, MO_LEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{
void *haddr;
Int128 ret;
validate_memop(oi, MO_128 | MO_BE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
memcpy(&ret, haddr, 16);
tcg_debug_assert((mop & MO_SIZE) == MO_128);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(env, ra, haddr, mop);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (!HOST_BIG_ENDIAN) {
if (mop & MO_BSWAP) {
ret = bswap128(ret);
}
return ret;
}
Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
Int128 ret;
validate_memop(oi, MO_128 | MO_LE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
memcpy(&ret, haddr, 16);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (HOST_BIG_ENDIAN) {
ret = bswap128(ret);
return do_ld16_mmu(env, addr, get_memop(oi), ra);
}
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
{
return helper_ld16_mmu(env, addr, oi, GETPC());
}
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
}
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st1_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_UB);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
do_st1_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_16);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap16(val);
}
store_atom_2(env, ra, haddr, mop, val);
clear_helper_retaddr();
}
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_BEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stw_be_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
do_st2_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_BEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stl_be_p(haddr, val);
clear_helper_retaddr();
do_st2_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_32);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap32(val);
}
store_atom_4(env, ra, haddr, mop, val);
clear_helper_retaddr();
}
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_BEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_be_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
do_st4_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_LEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stw_le_p(haddr, val);
clear_helper_retaddr();
do_st4_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_64);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap64(val);
}
store_atom_8(env, ra, haddr, mop, val);
clear_helper_retaddr();
}
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_LEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stl_le_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
do_st8_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_LEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_le_p(haddr, val);
clear_helper_retaddr();
do_st8_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
MemOp mop, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_128 | MO_BE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
if (!HOST_BIG_ENDIAN) {
tcg_debug_assert((mop & MO_SIZE) == MO_128);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap128(val);
}
memcpy(haddr, &val, 16);
store_atom_16(env, ra, haddr, mop, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
MemOpIdx oi, uintptr_t ra)
{
do_st16_mmu(env, addr, val, get_memop(oi), ra);
}
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
{
helper_st16_mmu(env, addr, val, oi, GETPC());
}
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_128 | MO_LE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
if (HOST_BIG_ENDIAN) {
val = bswap128(val);
}
memcpy(haddr, &val, 16);
clear_helper_retaddr();
do_st16_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
@ -1221,16 +1262,70 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret;
}
uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint8_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
ret = ldub_p(haddr);
clear_helper_retaddr();
return ret;
}
uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint16_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
ret = lduw_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap16(ret);
}
return ret;
}
uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint32_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
ret = ldl_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap32(ret);
}
return ret;
}
uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint64_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap64(ret);
}
return ret;
}
#include "ldst_common.c.inc"
/*
* Do not allow unaligned operations to proceed. Return the host address.
*
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
MemOpIdx oi, int size, int prot,
uintptr_t retaddr)
MemOpIdx oi, int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
@ -1238,8 +1333,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) {
MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr);
}
/* Enforce qemu required alignment. */
@ -1277,7 +1371,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#include "atomic_template.h"
#endif
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
#define DATA_SIZE 16
#include "atomic_template.h"
#endif

View File

@ -12,6 +12,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qapi/error.h"
#include "hw/xen/xen_native.h"
#include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/xen_pt.h"
#include "chardev/char.h"
@ -23,99 +24,18 @@
#include "migration/global_state.h"
#include "hw/boards.h"
//#define DEBUG_XEN
#ifdef DEBUG_XEN
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
bool xen_allowed;
xc_interface *xen_xc;
xenforeignmemory_handle *xen_fmem;
xendevicemodel_handle *xen_dmod;
static int store_dev_info(int domid, Chardev *cs, const char *string)
{
struct xs_handle *xs = NULL;
char *path = NULL;
char *newpath = NULL;
char *pts = NULL;
int ret = -1;
/* Only continue if we're talking to a pty. */
if (!CHARDEV_IS_PTY(cs)) {
return 0;
}
pts = cs->filename + 4;
/* We now have everything we need to set the xenstore entry. */
xs = xs_open(0);
if (xs == NULL) {
fprintf(stderr, "Could not contact XenStore\n");
goto out;
}
path = xs_get_domain_path(xs, domid);
if (path == NULL) {
fprintf(stderr, "xs_get_domain_path() error\n");
goto out;
}
newpath = realloc(path, (strlen(path) + strlen(string) +
strlen("/tty") + 1));
if (newpath == NULL) {
fprintf(stderr, "realloc error\n");
goto out;
}
path = newpath;
strcat(path, string);
strcat(path, "/tty");
if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) {
fprintf(stderr, "xs_write for '%s' fail", string);
goto out;
}
ret = 0;
out:
free(path);
xs_close(xs);
return ret;
}
void xenstore_store_pv_console_info(int i, Chardev *chr)
{
if (i == 0) {
store_dev_info(xen_domid, chr, "/console");
} else {
char buf[32];
snprintf(buf, sizeof(buf), "/device/console/%d", i);
store_dev_info(xen_domid, chr, buf);
}
}
static void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
static void xenstore_record_dm_state(const char *state)
{
char path[50];
if (xs == NULL) {
error_report("xenstore connection not initialized");
exit(1);
}
snprintf(path, sizeof (path), "device-model/%u/state", xen_domid);
/*
* This call may fail when running restricted so don't make it fatal in
* that case. Toolstacks should instead use QMP to listen for state changes.
*/
if (!xs_write(xs, XBT_NULL, path, state, strlen(state)) &&
!xen_domid_restrict) {
if (!qemu_xen_xs_write(xenstore, XBT_NULL, path, state, strlen(state))) {
error_report("error recording dm state");
exit(1);
}
@ -127,7 +47,7 @@ static void xen_change_state_handler(void *opaque, bool running,
{
if (running) {
/* record state running */
xenstore_record_dm_state(xenstore, "running");
xenstore_record_dm_state("running");
}
}
@ -176,11 +96,21 @@ static int xen_init(MachineState *ms)
xc_interface_close(xen_xc);
return -1;
}
/*
* The XenStore write would fail when running restricted so don't attempt
* it in that case. Toolstacks should instead use QMP to listen for state
* changes.
*/
if (!xen_domid_restrict) {
qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
}
/*
* opt out of system RAM being allocated by generic code
*/
mc->default_ram_id = NULL;
xen_mode = XEN_ATTACH;
return 0;
}

View File

@ -222,11 +222,7 @@ static int alsa_poll_helper (snd_pcm_t *handle, struct pollhlp *hlp, int mask)
return -1;
}
pfds = audio_calloc ("alsa_poll_helper", count, sizeof (*pfds));
if (!pfds) {
dolog ("Could not initialize poll mode\n");
return -1;
}
pfds = g_new0(struct pollfd, count);
err = snd_pcm_poll_descriptors (handle, pfds, count);
if (err < 0) {
@ -917,28 +913,23 @@ static void *alsa_audio_init(Audiodev *dev)
alsa_init_per_direction(aopts->in);
alsa_init_per_direction(aopts->out);
/*
* need to define them, as otherwise alsa produces no sound
* doesn't set has_* so alsa_open can identify it wasn't set by the user
*/
/* don't set has_* so alsa_open can identify it wasn't set by the user */
if (!dev->u.alsa.out->has_period_length) {
/* 1024 frames assuming 44100Hz */
dev->u.alsa.out->period_length = 1024 * 1000000 / 44100;
/* 256 frames assuming 44100Hz */
dev->u.alsa.out->period_length = 5805;
}
if (!dev->u.alsa.out->has_buffer_length) {
/* 4096 frames assuming 44100Hz */
dev->u.alsa.out->buffer_length = 4096ll * 1000000 / 44100;
dev->u.alsa.out->buffer_length = 92880;
}
/*
* OptsVisitor sets unspecified optional fields to zero, but do not depend
* on it...
*/
if (!dev->u.alsa.in->has_period_length) {
dev->u.alsa.in->period_length = 0;
/* 256 frames assuming 44100Hz */
dev->u.alsa.in->period_length = 5805;
}
if (!dev->u.alsa.in->has_buffer_length) {
dev->u.alsa.in->buffer_length = 0;
/* 4096 frames assuming 44100Hz */
dev->u.alsa.in->buffer_length = 92880;
}
return dev;

View File

@ -33,6 +33,7 @@
#include "qapi/qapi-visit-audio.h"
#include "qapi/qapi-commands-audio.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/help_option.h"
#include "sysemu/sysemu.h"
@ -148,26 +149,6 @@ static inline int audio_bits_to_index (int bits)
}
}
void *audio_calloc (const char *funcname, int nmemb, size_t size)
{
int cond;
size_t len;
len = nmemb * size;
cond = !nmemb || !size;
cond |= nmemb < 0;
cond |= len < size;
if (audio_bug ("audio_calloc", cond)) {
AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n",
funcname);
AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len);
return NULL;
}
return g_malloc0 (len);
}
void AUD_vlog (const char *cap, const char *fmt, va_list ap)
{
if (cap) {
@ -400,13 +381,6 @@ void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len)
/*
* Capture
*/
static void noop_conv (struct st_sample *dst, const void *src, int samples)
{
(void) src;
(void) dst;
(void) samples;
}
static CaptureVoiceOut *audio_pcm_capture_find_specific(AudioState *s,
struct audsettings *as)
{
@ -504,15 +478,8 @@ static int audio_attach_capture (HWVoiceOut *hw)
sw->info = hw->info;
sw->empty = 1;
sw->active = hw->enabled;
sw->conv = noop_conv;
sw->ratio = ((int64_t) hw_cap->info.freq << 32) / sw->info.freq;
sw->vol = nominal_volume;
sw->rate = st_rate_start (sw->info.freq, hw_cap->info.freq);
if (!sw->rate) {
dolog ("Could not start rate conversion for `%s'\n", SW_NAME (sw));
g_free (sw);
return -1;
}
QLIST_INSERT_HEAD (&hw_cap->sw_head, sw, entries);
QLIST_INSERT_HEAD (&hw->cap_head, sc, entries);
#ifdef DEBUG_CAPTURE
@ -547,8 +514,8 @@ static size_t audio_pcm_hw_find_min_in (HWVoiceIn *hw)
static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw)
{
size_t live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw);
if (audio_bug(__func__, live > hw->conv_buf->size)) {
dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
if (audio_bug(__func__, live > hw->conv_buf.size)) {
dolog("live=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size);
return 0;
}
return live;
@ -557,13 +524,13 @@ static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw)
static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples)
{
size_t conv = 0;
STSampleBuffer *conv_buf = hw->conv_buf;
STSampleBuffer *conv_buf = &hw->conv_buf;
while (samples) {
uint8_t *src = advance(pcm_buf, conv * hw->info.bytes_per_frame);
size_t proc = MIN(samples, conv_buf->size - conv_buf->pos);
hw->conv(conv_buf->samples + conv_buf->pos, src, proc);
hw->conv(conv_buf->buffer + conv_buf->pos, src, proc);
conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size;
samples -= proc;
conv += proc;
@ -575,56 +542,65 @@ static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples)
/*
* Soft voice (capture)
*/
static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size)
static void audio_pcm_sw_resample_in(SWVoiceIn *sw,
size_t frames_in_max, size_t frames_out_max,
size_t *total_in, size_t *total_out)
{
HWVoiceIn *hw = sw->hw;
size_t samples, live, ret = 0, swlim, isamp, osamp, rpos, total = 0;
struct st_sample *src, *dst = sw->buf;
struct st_sample *src, *dst;
size_t live, rpos, frames_in, frames_out;
live = hw->total_samples_captured - sw->total_hw_samples_acquired;
rpos = audio_ring_posb(hw->conv_buf.pos, live, hw->conv_buf.size);
/* resample conv_buf from rpos to end of buffer */
src = hw->conv_buf.buffer + rpos;
frames_in = MIN(frames_in_max, hw->conv_buf.size - rpos);
dst = sw->resample_buf.buffer;
frames_out = frames_out_max;
st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out);
rpos += frames_in;
*total_in = frames_in;
*total_out = frames_out;
/* resample conv_buf from start of buffer if there are input frames left */
if (frames_in_max - frames_in && rpos == hw->conv_buf.size) {
src = hw->conv_buf.buffer;
frames_in = frames_in_max - frames_in;
dst += frames_out;
frames_out = frames_out_max - frames_out;
st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out);
*total_in += frames_in;
*total_out += frames_out;
}
}
static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t buf_len)
{
HWVoiceIn *hw = sw->hw;
size_t live, frames_out_max, total_in, total_out;
live = hw->total_samples_captured - sw->total_hw_samples_acquired;
if (!live) {
return 0;
}
if (audio_bug(__func__, live > hw->conv_buf->size)) {
dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
if (audio_bug(__func__, live > hw->conv_buf.size)) {
dolog("live_in=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size);
return 0;
}
rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size);
frames_out_max = MIN(buf_len / sw->info.bytes_per_frame,
sw->resample_buf.size);
samples = size / sw->info.bytes_per_frame;
swlim = (live * sw->ratio) >> 32;
swlim = MIN (swlim, samples);
while (swlim) {
src = hw->conv_buf->samples + rpos;
if (hw->conv_buf->pos > rpos) {
isamp = hw->conv_buf->pos - rpos;
} else {
isamp = hw->conv_buf->size - rpos;
}
if (!isamp) {
break;
}
osamp = swlim;
st_rate_flow (sw->rate, src, dst, &isamp, &osamp);
swlim -= osamp;
rpos = (rpos + isamp) % hw->conv_buf->size;
dst += osamp;
ret += osamp;
total += isamp;
}
audio_pcm_sw_resample_in(sw, live, frames_out_max, &total_in, &total_out);
if (!hw->pcm_ops->volume_in) {
mixeng_volume (sw->buf, ret, &sw->vol);
mixeng_volume(sw->resample_buf.buffer, total_out, &sw->vol);
}
sw->clip(buf, sw->resample_buf.buffer, total_out);
sw->clip (buf, sw->buf, ret);
sw->total_hw_samples_acquired += total;
return ret * sw->info.bytes_per_frame;
sw->total_hw_samples_acquired += total_in;
return total_out * sw->info.bytes_per_frame;
}
/*
@ -660,8 +636,8 @@ static size_t audio_pcm_hw_get_live_out (HWVoiceOut *hw, int *nb_live)
if (nb_live1) {
size_t live = smin;
if (audio_bug(__func__, live > hw->mix_buf->size)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
if (audio_bug(__func__, live > hw->mix_buf.size)) {
dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
return 0;
}
return live;
@ -678,17 +654,17 @@ static size_t audio_pcm_hw_get_free(HWVoiceOut *hw)
static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len)
{
size_t clipped = 0;
size_t pos = hw->mix_buf->pos;
size_t pos = hw->mix_buf.pos;
while (len) {
st_sample *src = hw->mix_buf->samples + pos;
st_sample *src = hw->mix_buf.buffer + pos;
uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame);
size_t samples_till_end_of_buf = hw->mix_buf->size - pos;
size_t samples_till_end_of_buf = hw->mix_buf.size - pos;
size_t samples_to_clip = MIN(len, samples_till_end_of_buf);
hw->clip(dst, src, samples_to_clip);
pos = (pos + samples_to_clip) % hw->mix_buf->size;
pos = (pos + samples_to_clip) % hw->mix_buf.size;
len -= samples_to_clip;
clipped += samples_to_clip;
}
@ -697,84 +673,113 @@ static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len)
/*
* Soft voice (playback)
*/
static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
static void audio_pcm_sw_resample_out(SWVoiceOut *sw,
size_t frames_in_max, size_t frames_out_max,
size_t *total_in, size_t *total_out)
{
size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, blck;
size_t hw_free;
size_t ret = 0, pos = 0, total = 0;
if (!sw) {
return size;
}
hwsamples = sw->hw->mix_buf->size;
HWVoiceOut *hw = sw->hw;
struct st_sample *src, *dst;
size_t live, wpos, frames_in, frames_out;
live = sw->total_hw_samples_mixed;
if (audio_bug(__func__, live > hwsamples)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hwsamples);
wpos = (hw->mix_buf.pos + live) % hw->mix_buf.size;
/* write to mix_buf from wpos to end of buffer */
src = sw->resample_buf.buffer;
frames_in = frames_in_max;
dst = hw->mix_buf.buffer + wpos;
frames_out = MIN(frames_out_max, hw->mix_buf.size - wpos);
st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out);
wpos += frames_out;
*total_in = frames_in;
*total_out = frames_out;
/* write to mix_buf from start of buffer if there are input frames left */
if (frames_in_max - frames_in > 0 && wpos == hw->mix_buf.size) {
src += frames_in;
frames_in = frames_in_max - frames_in;
dst = hw->mix_buf.buffer;
frames_out = frames_out_max - frames_out;
st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out);
*total_in += frames_in;
*total_out += frames_out;
}
}
static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t buf_len)
{
HWVoiceOut *hw = sw->hw;
size_t live, dead, hw_free, sw_max, fe_max;
size_t frames_in_max, frames_out_max, total_in, total_out;
live = sw->total_hw_samples_mixed;
if (audio_bug(__func__, live > hw->mix_buf.size)) {
dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
return 0;
}
if (live == hwsamples) {
if (live == hw->mix_buf.size) {
#ifdef DEBUG_OUT
dolog ("%s is full %zu\n", sw->name, live);
#endif
return 0;
}
wpos = (sw->hw->mix_buf->pos + live) % hwsamples;
dead = hwsamples - live;
hw_free = audio_pcm_hw_get_free(sw->hw);
dead = hw->mix_buf.size - live;
hw_free = audio_pcm_hw_get_free(hw);
hw_free = hw_free > live ? hw_free - live : 0;
samples = ((int64_t)MIN(dead, hw_free) << 32) / sw->ratio;
samples = MIN(samples, size / sw->info.bytes_per_frame);
if (samples) {
sw->conv(sw->buf, buf, samples);
frames_out_max = MIN(dead, hw_free);
sw_max = st_rate_frames_in(sw->rate, frames_out_max);
fe_max = MIN(buf_len / sw->info.bytes_per_frame + sw->resample_buf.pos,
sw->resample_buf.size);
frames_in_max = MIN(sw_max, fe_max);
if (!frames_in_max) {
return 0;
}
if (frames_in_max > sw->resample_buf.pos) {
sw->conv(sw->resample_buf.buffer + sw->resample_buf.pos,
buf, frames_in_max - sw->resample_buf.pos);
if (!sw->hw->pcm_ops->volume_out) {
mixeng_volume(sw->buf, samples, &sw->vol);
mixeng_volume(sw->resample_buf.buffer + sw->resample_buf.pos,
frames_in_max - sw->resample_buf.pos, &sw->vol);
}
}
while (samples) {
dead = hwsamples - live;
left = hwsamples - wpos;
blck = MIN (dead, left);
if (!blck) {
break;
}
isamp = samples;
osamp = blck;
st_rate_flow_mix (
sw->rate,
sw->buf + pos,
sw->hw->mix_buf->samples + wpos,
&isamp,
&osamp
);
ret += isamp;
samples -= isamp;
pos += isamp;
live += osamp;
wpos = (wpos + osamp) % hwsamples;
total += osamp;
}
audio_pcm_sw_resample_out(sw, frames_in_max, frames_out_max,
&total_in, &total_out);
sw->total_hw_samples_mixed += total;
sw->total_hw_samples_mixed += total_out;
sw->empty = sw->total_hw_samples_mixed == 0;
/*
* Upsampling may leave one audio frame in the resample buffer. Decrement
* total_in by one if there was a leftover frame from the previous resample
* pass in the resample buffer. Increment total_in by one if the current
* resample pass left one frame in the resample buffer.
*/
if (frames_in_max - total_in == 1) {
/* copy one leftover audio frame to the beginning of the buffer */
*sw->resample_buf.buffer = *(sw->resample_buf.buffer + total_in);
total_in += 1 - sw->resample_buf.pos;
sw->resample_buf.pos = 1;
} else if (total_in >= sw->resample_buf.pos) {
total_in -= sw->resample_buf.pos;
sw->resample_buf.pos = 0;
}
#ifdef DEBUG_OUT
dolog (
"%s: write size %zu ret %zu total sw %zu\n",
"%s: write size %zu written %zu total mixed %zu\n",
SW_NAME(sw),
size / sw->info.bytes_per_frame,
ret,
buf_len / sw->info.bytes_per_frame,
total_in,
sw->total_hw_samples_mixed
);
#endif
return ret * sw->info.bytes_per_frame;
return total_in * sw->info.bytes_per_frame;
}
#ifdef DEBUG_AUDIO
@ -992,18 +997,6 @@ void AUD_set_active_in (SWVoiceIn *sw, int on)
}
}
/**
* audio_frontend_frames_in() - returns the number of frames the resampling
* code generates from frames_in frames
*
* @sw: audio recording frontend
* @frames_in: number of frames
*/
static size_t audio_frontend_frames_in(SWVoiceIn *sw, size_t frames_in)
{
return (int64_t)frames_in * sw->ratio >> 32;
}
static size_t audio_get_avail (SWVoiceIn *sw)
{
size_t live;
@ -1013,33 +1006,21 @@ static size_t audio_get_avail (SWVoiceIn *sw)
}
live = sw->hw->total_samples_captured - sw->total_hw_samples_acquired;
if (audio_bug(__func__, live > sw->hw->conv_buf->size)) {
dolog("live=%zu sw->hw->conv_buf->size=%zu\n", live,
sw->hw->conv_buf->size);
if (audio_bug(__func__, live > sw->hw->conv_buf.size)) {
dolog("live=%zu sw->hw->conv_buf.size=%zu\n", live,
sw->hw->conv_buf.size);
return 0;
}
ldebug (
"%s: get_avail live %zu frontend frames %zu\n",
"%s: get_avail live %zu frontend frames %u\n",
SW_NAME (sw),
live, audio_frontend_frames_in(sw, live)
live, st_rate_frames_out(sw->rate, live)
);
return live;
}
/**
* audio_frontend_frames_out() - returns the number of frames needed to
* get frames_out frames after resampling
*
* @sw: audio playback frontend
* @frames_out: number of frames
*/
static size_t audio_frontend_frames_out(SWVoiceOut *sw, size_t frames_out)
{
return ((int64_t)frames_out << 32) / sw->ratio;
}
static size_t audio_get_free(SWVoiceOut *sw)
{
size_t live, dead;
@ -1050,17 +1031,17 @@ static size_t audio_get_free(SWVoiceOut *sw)
live = sw->total_hw_samples_mixed;
if (audio_bug(__func__, live > sw->hw->mix_buf->size)) {
dolog("live=%zu sw->hw->mix_buf->size=%zu\n", live,
sw->hw->mix_buf->size);
if (audio_bug(__func__, live > sw->hw->mix_buf.size)) {
dolog("live=%zu sw->hw->mix_buf.size=%zu\n", live,
sw->hw->mix_buf.size);
return 0;
}
dead = sw->hw->mix_buf->size - live;
dead = sw->hw->mix_buf.size - live;
#ifdef DEBUG_OUT
dolog("%s: get_free live %zu dead %zu frontend frames %zu\n",
SW_NAME(sw), live, dead, audio_frontend_frames_out(sw, dead));
dolog("%s: get_free live %zu dead %zu frontend frames %u\n",
SW_NAME(sw), live, dead, st_rate_frames_in(sw->rate, dead));
#endif
return dead;
@ -1076,32 +1057,40 @@ static void audio_capture_mix_and_clear(HWVoiceOut *hw, size_t rpos,
for (sc = hw->cap_head.lh_first; sc; sc = sc->entries.le_next) {
SWVoiceOut *sw = &sc->sw;
int rpos2 = rpos;
size_t rpos2 = rpos;
n = samples;
while (n) {
size_t till_end_of_hw = hw->mix_buf->size - rpos2;
size_t to_write = MIN(till_end_of_hw, n);
size_t bytes = to_write * hw->info.bytes_per_frame;
size_t written;
size_t till_end_of_hw = hw->mix_buf.size - rpos2;
size_t to_read = MIN(till_end_of_hw, n);
size_t live, frames_in, frames_out;
sw->buf = hw->mix_buf->samples + rpos2;
written = audio_pcm_sw_write (sw, NULL, bytes);
if (written - bytes) {
dolog("Could not mix %zu bytes into a capture "
sw->resample_buf.buffer = hw->mix_buf.buffer + rpos2;
sw->resample_buf.size = to_read;
live = sw->total_hw_samples_mixed;
audio_pcm_sw_resample_out(sw,
to_read, sw->hw->mix_buf.size - live,
&frames_in, &frames_out);
sw->total_hw_samples_mixed += frames_out;
sw->empty = sw->total_hw_samples_mixed == 0;
if (to_read - frames_in) {
dolog("Could not mix %zu frames into a capture "
"buffer, mixed %zu\n",
bytes, written);
to_read, frames_in);
break;
}
n -= to_write;
rpos2 = (rpos2 + to_write) % hw->mix_buf->size;
n -= to_read;
rpos2 = (rpos2 + to_read) % hw->mix_buf.size;
}
}
}
n = MIN(samples, hw->mix_buf->size - rpos);
mixeng_clear(hw->mix_buf->samples + rpos, n);
mixeng_clear(hw->mix_buf->samples, samples - n);
n = MIN(samples, hw->mix_buf.size - rpos);
mixeng_clear(hw->mix_buf.buffer + rpos, n);
mixeng_clear(hw->mix_buf.buffer, samples - n);
}
static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live)
@ -1127,7 +1116,7 @@ static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live)
live -= proc;
clipped += proc;
hw->mix_buf->pos = (hw->mix_buf->pos + proc) % hw->mix_buf->size;
hw->mix_buf.pos = (hw->mix_buf.pos + proc) % hw->mix_buf.size;
if (proc == 0 || proc < decr) {
break;
@ -1181,12 +1170,14 @@ static void audio_run_out (AudioState *s)
size_t free;
if (hw_free > sw->total_hw_samples_mixed) {
free = audio_frontend_frames_out(sw,
free = st_rate_frames_in(sw->rate,
MIN(sw_free, hw_free - sw->total_hw_samples_mixed));
} else {
free = 0;
}
if (free > 0) {
if (free > sw->resample_buf.pos) {
free = MIN(free, sw->resample_buf.size)
- sw->resample_buf.pos;
sw->callback.fn(sw->callback.opaque,
free * sw->info.bytes_per_frame);
}
@ -1198,8 +1189,8 @@ static void audio_run_out (AudioState *s)
live = 0;
}
if (audio_bug(__func__, live > hw->mix_buf->size)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
if (audio_bug(__func__, live > hw->mix_buf.size)) {
dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
continue;
}
@ -1227,13 +1218,13 @@ static void audio_run_out (AudioState *s)
continue;
}
prev_rpos = hw->mix_buf->pos;
prev_rpos = hw->mix_buf.pos;
played = audio_pcm_hw_run_out(hw, live);
replay_audio_out(&played);
if (audio_bug(__func__, hw->mix_buf->pos >= hw->mix_buf->size)) {
dolog("hw->mix_buf->pos=%zu hw->mix_buf->size=%zu played=%zu\n",
hw->mix_buf->pos, hw->mix_buf->size, played);
hw->mix_buf->pos = 0;
if (audio_bug(__func__, hw->mix_buf.pos >= hw->mix_buf.size)) {
dolog("hw->mix_buf.pos=%zu hw->mix_buf.size=%zu played=%zu\n",
hw->mix_buf.pos, hw->mix_buf.size, played);
hw->mix_buf.pos = 0;
}
#ifdef DEBUG_OUT
@ -1314,10 +1305,10 @@ static void audio_run_in (AudioState *s)
if (replay_mode != REPLAY_MODE_PLAY) {
captured = audio_pcm_hw_run_in(
hw, hw->conv_buf->size - audio_pcm_hw_get_live_in(hw));
hw, hw->conv_buf.size - audio_pcm_hw_get_live_in(hw));
}
replay_audio_in(&captured, hw->conv_buf->samples, &hw->conv_buf->pos,
hw->conv_buf->size);
replay_audio_in(&captured, hw->conv_buf.buffer, &hw->conv_buf.pos,
hw->conv_buf.size);
min = audio_pcm_hw_find_min_in (hw);
hw->total_samples_captured += captured - min;
@ -1330,8 +1321,9 @@ static void audio_run_in (AudioState *s)
size_t sw_avail = audio_get_avail(sw);
size_t avail;
avail = audio_frontend_frames_in(sw, sw_avail);
avail = st_rate_frames_out(sw->rate, sw_avail);
if (avail > 0) {
avail = MIN(avail, sw->resample_buf.size);
sw->callback.fn(sw->callback.opaque,
avail * sw->info.bytes_per_frame);
}
@ -1350,14 +1342,14 @@ static void audio_run_capture (AudioState *s)
SWVoiceOut *sw;
captured = live = audio_pcm_hw_get_live_out (hw, NULL);
rpos = hw->mix_buf->pos;
rpos = hw->mix_buf.pos;
while (live) {
size_t left = hw->mix_buf->size - rpos;
size_t left = hw->mix_buf.size - rpos;
size_t to_capture = MIN(live, left);
struct st_sample *src;
struct capture_callback *cb;
src = hw->mix_buf->samples + rpos;
src = hw->mix_buf.buffer + rpos;
hw->clip (cap->buf, src, to_capture);
mixeng_clear (src, to_capture);
@ -1365,10 +1357,10 @@ static void audio_run_capture (AudioState *s)
cb->ops.capture (cb->opaque, cap->buf,
to_capture * hw->info.bytes_per_frame);
}
rpos = (rpos + to_capture) % hw->mix_buf->size;
rpos = (rpos + to_capture) % hw->mix_buf.size;
live -= to_capture;
}
hw->mix_buf->pos = rpos;
hw->mix_buf.pos = rpos;
for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) {
if (!sw->active && sw->empty) {
@ -1927,7 +1919,7 @@ CaptureVoiceOut *AUD_add_capture(
audio_pcm_init_info (&hw->info, as);
cap->buf = g_malloc0_n(hw->mix_buf->size, hw->info.bytes_per_frame);
cap->buf = g_malloc0_n(hw->mix_buf.size, hw->info.bytes_per_frame);
if (hw->info.is_float) {
hw->clip = mixeng_clip_float[hw->info.nchannels == 2];
@ -1979,7 +1971,7 @@ void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque)
sw = sw1;
}
QLIST_REMOVE (cap, entries);
g_free (cap->hw.mix_buf);
g_free(cap->hw.mix_buf.buffer);
g_free (cap->buf);
g_free (cap);
}
@ -2069,6 +2061,9 @@ void audio_create_pdos(Audiodev *dev)
#ifdef CONFIG_AUDIO_PA
CASE(PA, pa, Pa);
#endif
#ifdef CONFIG_AUDIO_PIPEWIRE
CASE(PIPEWIRE, pipewire, Pipewire);
#endif
#ifdef CONFIG_AUDIO_SDL
CASE(SDL, sdl, Sdl);
#endif

View File

@ -58,7 +58,7 @@ typedef struct SWVoiceCap SWVoiceCap;
typedef struct STSampleBuffer {
size_t pos, size;
st_sample samples[];
st_sample *buffer;
} STSampleBuffer;
typedef struct HWVoiceOut {
@ -71,7 +71,7 @@ typedef struct HWVoiceOut {
f_sample *clip;
uint64_t ts_helper;
STSampleBuffer *mix_buf;
STSampleBuffer mix_buf;
void *buf_emul;
size_t pos_emul, pending_emul, size_emul;
@ -93,7 +93,7 @@ typedef struct HWVoiceIn {
size_t total_samples_captured;
uint64_t ts_helper;
STSampleBuffer *conv_buf;
STSampleBuffer conv_buf;
void *buf_emul;
size_t pos_emul, pending_emul, size_emul;
@ -108,8 +108,7 @@ struct SWVoiceOut {
AudioState *s;
struct audio_pcm_info info;
t_sample *conv;
int64_t ratio;
struct st_sample *buf;
STSampleBuffer resample_buf;
void *rate;
size_t total_hw_samples_mixed;
int active;
@ -126,10 +125,9 @@ struct SWVoiceIn {
AudioState *s;
int active;
struct audio_pcm_info info;
int64_t ratio;
void *rate;
size_t total_hw_samples_acquired;
struct st_sample *buf;
STSampleBuffer resample_buf;
f_sample *clip;
HWVoiceIn *hw;
char *name;
@ -145,14 +143,14 @@ struct audio_driver {
void *(*init) (Audiodev *);
void (*fini) (void *);
#ifdef CONFIG_GIO
void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager);
void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p);
#endif
struct audio_pcm_ops *pcm_ops;
int can_be_default;
int max_voices_out;
int max_voices_in;
int voice_size_out;
int voice_size_in;
size_t voice_size_out;
size_t voice_size_in;
QLIST_ENTRY(audio_driver) next;
};
@ -251,7 +249,6 @@ void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
int audio_bug (const char *funcname, int cond);
void *audio_calloc (const char *funcname, int nmemb, size_t size);
void audio_run(AudioState *s, const char *msg);
@ -294,9 +291,6 @@ static inline size_t audio_ring_posb(size_t pos, size_t dist, size_t len)
#define ldebug(fmt, ...) (void)0
#endif
#define AUDIO_STRINGIFY_(n) #n
#define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n)
typedef struct AudiodevListEntry {
Audiodev *dev;
QSIMPLEQ_ENTRY(AudiodevListEntry) next;

View File

@ -40,7 +40,7 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
struct audio_driver *drv)
{
int max_voices = glue (drv->max_voices_, TYPE);
int voice_size = glue (drv->voice_size_, TYPE);
size_t voice_size = glue(drv->voice_size_, TYPE);
if (glue (s->nb_hw_voices_, TYPE) > max_voices) {
if (!max_voices) {
@ -63,7 +63,7 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
}
if (audio_bug(__func__, voice_size && !max_voices)) {
dolog ("drv=`%s' voice_size=%d max_voices=0\n",
dolog("drv=`%s' voice_size=%zu max_voices=0\n",
drv->name, voice_size);
}
}
@ -71,8 +71,9 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw)
{
g_free(hw->buf_emul);
g_free (HWBUF);
HWBUF = NULL;
g_free(HWBUF.buffer);
HWBUF.buffer = NULL;
HWBUF.size = 0;
}
static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
@ -83,56 +84,67 @@ static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
dolog("Attempted to allocate empty buffer\n");
}
HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples);
HWBUF->size = samples;
HWBUF.buffer = g_new0(st_sample, samples);
HWBUF.size = samples;
HWBUF.pos = 0;
} else {
HWBUF = NULL;
HWBUF.buffer = NULL;
HWBUF.size = 0;
}
}
static void glue (audio_pcm_sw_free_resources_, TYPE) (SW *sw)
{
g_free (sw->buf);
g_free(sw->resample_buf.buffer);
sw->resample_buf.buffer = NULL;
sw->resample_buf.size = 0;
if (sw->rate) {
st_rate_stop (sw->rate);
}
sw->buf = NULL;
sw->rate = NULL;
}
static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw)
{
int samples;
HW *hw = sw->hw;
uint64_t samples;
if (!glue(audio_get_pdo_, TYPE)(sw->s->dev)->mixing_engine) {
return 0;
}
#ifdef DAC
samples = ((int64_t) sw->HWBUF->size << 32) / sw->ratio;
#else
samples = (int64_t)sw->HWBUF->size * sw->ratio >> 32;
#endif
samples = muldiv64(HWBUF.size, sw->info.freq, hw->info.freq);
if (samples == 0) {
uint64_t f_fe_min;
uint64_t f_be = (uint32_t)hw->info.freq;
sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample));
if (!sw->buf) {
dolog ("Could not allocate buffer for `%s' (%d samples)\n",
SW_NAME (sw), samples);
/* f_fe_min = ceil(1 [frames] * f_be [Hz] / size_be [frames]) */
f_fe_min = (f_be + HWBUF.size - 1) / HWBUF.size;
qemu_log_mask(LOG_UNIMP,
AUDIO_CAP ": The guest selected a " NAME " sample rate"
" of %d Hz for %s. Only sample rates >= %" PRIu64 " Hz"
" are supported.\n",
sw->info.freq, sw->name, f_fe_min);
return -1;
}
/*
* Allocate one additional audio frame that is needed for upsampling
* if the resample buffer size is small. For large buffer sizes take
* care of overflows and truncation.
*/
samples = samples < SIZE_MAX ? samples + 1 : SIZE_MAX;
sw->resample_buf.buffer = g_new0(st_sample, samples);
sw->resample_buf.size = samples;
sw->resample_buf.pos = 0;
#ifdef DAC
sw->rate = st_rate_start (sw->info.freq, sw->hw->info.freq);
sw->rate = st_rate_start(sw->info.freq, hw->info.freq);
#else
sw->rate = st_rate_start (sw->hw->info.freq, sw->info.freq);
sw->rate = st_rate_start(hw->info.freq, sw->info.freq);
#endif
if (!sw->rate) {
g_free (sw->buf);
sw->buf = NULL;
return -1;
}
return 0;
}
@ -149,11 +161,8 @@ static int glue (audio_pcm_sw_init_, TYPE) (
sw->hw = hw;
sw->active = 0;
#ifdef DAC
sw->ratio = ((int64_t) sw->hw->info.freq << 32) / sw->info.freq;
sw->total_hw_samples_mixed = 0;
sw->empty = 1;
#else
sw->ratio = ((int64_t) sw->info.freq << 32) / sw->hw->info.freq;
#endif
if (sw->info.is_float) {
@ -264,13 +273,11 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
return NULL;
}
hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE));
if (!hw) {
dolog ("Can not allocate voice `%s' size %d\n",
drv->name, glue (drv->voice_size_, TYPE));
return NULL;
}
/*
* Since glue(s->nb_hw_voices_, TYPE) is != 0, glue(drv->voice_size_, TYPE)
* is guaranteed to be != 0. See the audio_init_nb_voices_* functions.
*/
hw = g_malloc0(glue(drv->voice_size_, TYPE));
hw->s = s;
hw->pcm_ops = drv->pcm_ops;
@ -355,6 +362,10 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev)
case AUDIODEV_DRIVER_PA:
return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE);
#endif
#ifdef CONFIG_AUDIO_PIPEWIRE
case AUDIODEV_DRIVER_PIPEWIRE:
return qapi_AudiodevPipewirePerDirectionOptions_base(dev->u.pipewire.TYPE);
#endif
#ifdef CONFIG_AUDIO_SDL
case AUDIODEV_DRIVER_SDL:
return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE);
@ -418,33 +429,28 @@ static SW *glue(audio_pcm_create_voice_pair_, TYPE)(
hw_as = *as;
}
sw = audio_calloc(__func__, 1, sizeof(*sw));
if (!sw) {
dolog ("Could not allocate soft voice `%s' (%zu bytes)\n",
sw_name ? sw_name : "unknown", sizeof (*sw));
goto err1;
}
sw = g_new0(SW, 1);
sw->s = s;
hw = glue(audio_pcm_hw_add_, TYPE)(s, &hw_as);
if (!hw) {
goto err2;
dolog("Could not create a backend for voice `%s'\n", sw_name);
goto err1;
}
glue (audio_pcm_hw_add_sw_, TYPE) (hw, sw);
if (glue (audio_pcm_sw_init_, TYPE) (sw, hw, sw_name, as)) {
goto err3;
goto err2;
}
return sw;
err3:
err2:
glue (audio_pcm_hw_del_sw_, TYPE) (sw);
glue (audio_pcm_hw_gc_, TYPE) (&hw);
err2:
g_free (sw);
err1:
g_free(sw);
return NULL;
}
@ -515,7 +521,7 @@ SW *glue (AUD_open_, TYPE) (
HW *hw = sw->hw;
if (!hw) {
dolog ("Internal logic error voice `%s' has no hardware store\n",
dolog("Internal logic error: voice `%s' has no backend\n",
SW_NAME(sw));
goto fail;
}
@ -527,7 +533,6 @@ SW *glue (AUD_open_, TYPE) (
} else {
sw = glue(audio_pcm_create_voice_pair_, TYPE)(s, name, as);
if (!sw) {
dolog ("Failed to create voice `%s'\n", name);
return NULL;
}
}

View File

@ -43,6 +43,7 @@
typedef struct DBusAudio {
GDBusObjectManagerServer *server;
bool p2p;
GDBusObjectSkeleton *audio;
QemuDBusDisplay1Audio *iface;
GHashTable *out_listeners;
@ -448,7 +449,8 @@ dbus_audio_register_listener(AudioState *s,
bool out)
{
DBusAudio *da = s->drv_opaque;
const char *sender = g_dbus_method_invocation_get_sender(invocation);
const char *sender =
da->p2p ? "p2p" : g_dbus_method_invocation_get_sender(invocation);
g_autoptr(GDBusConnection) listener_conn = NULL;
g_autoptr(GError) err = NULL;
g_autoptr(GSocket) socket = NULL;
@ -591,7 +593,7 @@ dbus_audio_register_in_listener(AudioState *s,
}
static void
dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server)
dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p)
{
DBusAudio *da = s->drv_opaque;
@ -599,6 +601,7 @@ dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server)
g_assert(!da->server);
da->server = g_object_ref(server);
da->p2p = p2p;
da->audio = g_dbus_object_skeleton_new(DBUS_DISPLAY1_AUDIO_PATH);
da->iface = qemu_dbus_display1_audio_skeleton_new();

View File

@ -19,6 +19,7 @@ foreach m : [
['sdl', sdl, files('sdlaudio.c')],
['jack', jack, files('jackaudio.c')],
['sndio', sndio, files('sndioaudio.c')],
['pipewire', pipewire, files('pwaudio.c')],
['spice', spice, files('spiceaudio.c')]
]
if m[1].found()

View File

@ -414,12 +414,7 @@ struct rate {
*/
void *st_rate_start (int inrate, int outrate)
{
struct rate *rate = audio_calloc(__func__, 1, sizeof(*rate));
if (!rate) {
dolog ("Could not allocate resampler (%zu bytes)\n", sizeof (*rate));
return NULL;
}
struct rate *rate = g_new0(struct rate, 1);
rate->opos = 0;
@ -445,6 +440,86 @@ void st_rate_stop (void *opaque)
g_free (opaque);
}
/**
* st_rate_frames_out() - returns the number of frames the resampling code
* generates from frames_in frames
*
* @opaque: pointer to struct rate
* @frames_in: number of frames
*
* When upsampling, there may be more than one correct result. In this case,
* the function returns the maximum number of output frames the resampling
* code can generate.
*/
uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in)
{
struct rate *rate = opaque;
uint64_t opos_end, opos_delta;
uint32_t ipos_end;
uint32_t frames_out;
if (rate->opos_inc == 1ULL << 32) {
return frames_in;
}
/* no output frame without at least one input frame */
if (!frames_in) {
return 0;
}
/* last frame read was at rate->ipos - 1 */
ipos_end = rate->ipos - 1 + frames_in;
opos_end = (uint64_t)ipos_end << 32;
/* last frame written was at rate->opos - rate->opos_inc */
if (opos_end + rate->opos_inc <= rate->opos) {
return 0;
}
opos_delta = opos_end - rate->opos + rate->opos_inc;
frames_out = opos_delta / rate->opos_inc;
return opos_delta % rate->opos_inc ? frames_out : frames_out - 1;
}
/**
* st_rate_frames_in() - returns the number of frames needed to
* get frames_out frames after resampling
*
* @opaque: pointer to struct rate
* @frames_out: number of frames
*
* When downsampling, there may be more than one correct result. In this
* case, the function returns the maximum number of input frames needed.
*/
uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out)
{
struct rate *rate = opaque;
uint64_t opos_start, opos_end;
uint32_t ipos_start, ipos_end;
if (rate->opos_inc == 1ULL << 32) {
return frames_out;
}
if (frames_out) {
opos_start = rate->opos;
ipos_start = rate->ipos;
} else {
uint64_t offset;
/* add offset = ceil(opos_inc) to opos and ipos to avoid an underflow */
offset = (rate->opos_inc + (1ULL << 32) - 1) & ~((1ULL << 32) - 1);
opos_start = rate->opos + offset;
ipos_start = rate->ipos + (offset >> 32);
}
/* last frame written was at opos_start - rate->opos_inc */
opos_end = opos_start - rate->opos_inc + rate->opos_inc * frames_out;
ipos_end = (opos_end >> 32) + 1;
/* last frame read was at ipos_start - 1 */
return ipos_end + 1 > ipos_start ? ipos_end + 1 - ipos_start : 0;
}
void mixeng_clear (struct st_sample *buf, int len)
{
memset (buf, 0, len * sizeof (struct st_sample));

View File

@ -52,6 +52,8 @@ void st_rate_flow(void *opaque, st_sample *ibuf, st_sample *obuf,
void st_rate_flow_mix(void *opaque, st_sample *ibuf, st_sample *obuf,
size_t *isamp, size_t *osamp);
void st_rate_stop (void *opaque);
uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in);
uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out);
void mixeng_clear (struct st_sample *buf, int len);
void mixeng_volume (struct st_sample *buf, int len, struct mixeng_volume *vol);

915
audio/pwaudio.c Normal file
View File

@ -0,0 +1,915 @@
/*
* QEMU Pipewire audio driver
*
* Copyright (c) 2023 Red Hat Inc.
*
* Author: Dorinda Bassey <dbassey@redhat.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "audio.h"
#include <errno.h>
#include "qemu/error-report.h"
#include <spa/param/audio/format-utils.h>
#include <spa/utils/ringbuffer.h>
#include <spa/utils/result.h>
#include <spa/param/props.h>
#include <pipewire/pipewire.h>
#include "trace.h"
#define AUDIO_CAP "pipewire"
#define RINGBUFFER_SIZE (1u << 22)
#define RINGBUFFER_MASK (RINGBUFFER_SIZE - 1)
#include "audio_int.h"
typedef struct pwvolume {
uint32_t channels;
float values[SPA_AUDIO_MAX_CHANNELS];
} pwvolume;
typedef struct pwaudio {
Audiodev *dev;
struct pw_thread_loop *thread_loop;
struct pw_context *context;
struct pw_core *core;
struct spa_hook core_listener;
int last_seq, pending_seq, error;
} pwaudio;
typedef struct PWVoice {
pwaudio *g;
struct pw_stream *stream;
struct spa_hook stream_listener;
struct spa_audio_info_raw info;
uint32_t highwater_mark;
uint32_t frame_size, req;
struct spa_ringbuffer ring;
uint8_t buffer[RINGBUFFER_SIZE];
pwvolume volume;
bool muted;
} PWVoice;
typedef struct PWVoiceOut {
HWVoiceOut hw;
PWVoice v;
} PWVoiceOut;
typedef struct PWVoiceIn {
HWVoiceIn hw;
PWVoice v;
} PWVoiceIn;
static void
stream_destroy(void *data)
{
PWVoice *v = (PWVoice *) data;
spa_hook_remove(&v->stream_listener);
v->stream = NULL;
}
/* output data processing function to read stuffs from the buffer */
static void
playback_on_process(void *data)
{
PWVoice *v = data;
void *p;
struct pw_buffer *b;
struct spa_buffer *buf;
uint32_t req, index, n_bytes;
int32_t avail;
assert(v->stream);
/* obtain a buffer to read from */
b = pw_stream_dequeue_buffer(v->stream);
if (b == NULL) {
error_report("out of buffers: %s", strerror(errno));
return;
}
buf = b->buffer;
p = buf->datas[0].data;
if (p == NULL) {
return;
}
/* calculate the total no of bytes to read data from buffer */
req = b->requested * v->frame_size;
if (req == 0) {
req = v->req;
}
n_bytes = SPA_MIN(req, buf->datas[0].maxsize);
/* get no of available bytes to read data from buffer */
avail = spa_ringbuffer_get_read_index(&v->ring, &index);
if (avail <= 0) {
PWVoiceOut *vo = container_of(data, PWVoiceOut, v);
audio_pcm_info_clear_buf(&vo->hw.info, p, n_bytes / v->frame_size);
} else {
if ((uint32_t) avail < n_bytes) {
/*
* PipeWire immediately calls this callback again if we provide
* less than n_bytes. Then audio_pcm_info_clear_buf() fills the
* rest of the buffer with silence.
*/
n_bytes = avail;
}
spa_ringbuffer_read_data(&v->ring,
v->buffer, RINGBUFFER_SIZE,
index & RINGBUFFER_MASK, p, n_bytes);
index += n_bytes;
spa_ringbuffer_read_update(&v->ring, index);
}
buf->datas[0].chunk->offset = 0;
buf->datas[0].chunk->stride = v->frame_size;
buf->datas[0].chunk->size = n_bytes;
/* queue the buffer for playback */
pw_stream_queue_buffer(v->stream, b);
}
/* output data processing function to generate stuffs in the buffer */
static void
capture_on_process(void *data)
{
PWVoice *v = (PWVoice *) data;
void *p;
struct pw_buffer *b;
struct spa_buffer *buf;
int32_t filled;
uint32_t index, offs, n_bytes;
assert(v->stream);
/* obtain a buffer */
b = pw_stream_dequeue_buffer(v->stream);
if (b == NULL) {
error_report("out of buffers: %s", strerror(errno));
return;
}
/* Write data into buffer */
buf = b->buffer;
p = buf->datas[0].data;
if (p == NULL) {
return;
}
offs = SPA_MIN(buf->datas[0].chunk->offset, buf->datas[0].maxsize);
n_bytes = SPA_MIN(buf->datas[0].chunk->size, buf->datas[0].maxsize - offs);
filled = spa_ringbuffer_get_write_index(&v->ring, &index);
if (filled < 0) {
error_report("%p: underrun write:%u filled:%d", p, index, filled);
} else {
if ((uint32_t) filled + n_bytes > RINGBUFFER_SIZE) {
error_report("%p: overrun write:%u filled:%d + size:%u > max:%u",
p, index, filled, n_bytes, RINGBUFFER_SIZE);
}
}
spa_ringbuffer_write_data(&v->ring,
v->buffer, RINGBUFFER_SIZE,
index & RINGBUFFER_MASK,
SPA_PTROFF(p, offs, void), n_bytes);
index += n_bytes;
spa_ringbuffer_write_update(&v->ring, index);
/* queue the buffer for playback */
pw_stream_queue_buffer(v->stream, b);
}
static void
on_stream_state_changed(void *data, enum pw_stream_state old,
enum pw_stream_state state, const char *error)
{
PWVoice *v = (PWVoice *) data;
trace_pw_state_changed(pw_stream_get_node_id(v->stream),
pw_stream_state_as_string(state));
switch (state) {
case PW_STREAM_STATE_ERROR:
case PW_STREAM_STATE_UNCONNECTED:
break;
case PW_STREAM_STATE_PAUSED:
case PW_STREAM_STATE_CONNECTING:
case PW_STREAM_STATE_STREAMING:
break;
}
}
static const struct pw_stream_events capture_stream_events = {
PW_VERSION_STREAM_EVENTS,
.destroy = stream_destroy,
.state_changed = on_stream_state_changed,
.process = capture_on_process
};
static const struct pw_stream_events playback_stream_events = {
PW_VERSION_STREAM_EVENTS,
.destroy = stream_destroy,
.state_changed = on_stream_state_changed,
.process = playback_on_process
};
static size_t
qpw_read(HWVoiceIn *hw, void *data, size_t len)
{
PWVoiceIn *pw = (PWVoiceIn *) hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
const char *error = NULL;
size_t l;
int32_t avail;
uint32_t index;
pw_thread_loop_lock(c->thread_loop);
if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) {
/* wait for stream to become ready */
l = 0;
goto done_unlock;
}
/* get no of available bytes to read data from buffer */
avail = spa_ringbuffer_get_read_index(&v->ring, &index);
trace_pw_read(avail, index, len);
if (avail < (int32_t) len) {
len = avail;
}
spa_ringbuffer_read_data(&v->ring,
v->buffer, RINGBUFFER_SIZE,
index & RINGBUFFER_MASK, data, len);
index += len;
spa_ringbuffer_read_update(&v->ring, index);
l = len;
done_unlock:
pw_thread_loop_unlock(c->thread_loop);
return l;
}
static size_t qpw_buffer_get_free(HWVoiceOut *hw)
{
PWVoiceOut *pw = (PWVoiceOut *)hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
const char *error = NULL;
int32_t filled, avail;
uint32_t index;
pw_thread_loop_lock(c->thread_loop);
if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) {
/* wait for stream to become ready */
avail = 0;
goto done_unlock;
}
filled = spa_ringbuffer_get_write_index(&v->ring, &index);
avail = v->highwater_mark - filled;
done_unlock:
pw_thread_loop_unlock(c->thread_loop);
return avail;
}
static size_t
qpw_write(HWVoiceOut *hw, void *data, size_t len)
{
PWVoiceOut *pw = (PWVoiceOut *) hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
const char *error = NULL;
int32_t filled, avail;
uint32_t index;
pw_thread_loop_lock(c->thread_loop);
if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) {
/* wait for stream to become ready */
len = 0;
goto done_unlock;
}
filled = spa_ringbuffer_get_write_index(&v->ring, &index);
avail = v->highwater_mark - filled;
trace_pw_write(filled, avail, index, len);
if (len > avail) {
len = avail;
}
if (filled < 0) {
error_report("%p: underrun write:%u filled:%d", pw, index, filled);
} else {
if ((uint32_t) filled + len > RINGBUFFER_SIZE) {
error_report("%p: overrun write:%u filled:%d + size:%zu > max:%u",
pw, index, filled, len, RINGBUFFER_SIZE);
}
}
spa_ringbuffer_write_data(&v->ring,
v->buffer, RINGBUFFER_SIZE,
index & RINGBUFFER_MASK, data, len);
index += len;
spa_ringbuffer_write_update(&v->ring, index);
done_unlock:
pw_thread_loop_unlock(c->thread_loop);
return len;
}
static int
audfmt_to_pw(AudioFormat fmt, int endianness)
{
int format;
switch (fmt) {
case AUDIO_FORMAT_S8:
format = SPA_AUDIO_FORMAT_S8;
break;
case AUDIO_FORMAT_U8:
format = SPA_AUDIO_FORMAT_U8;
break;
case AUDIO_FORMAT_S16:
format = endianness ? SPA_AUDIO_FORMAT_S16_BE : SPA_AUDIO_FORMAT_S16_LE;
break;
case AUDIO_FORMAT_U16:
format = endianness ? SPA_AUDIO_FORMAT_U16_BE : SPA_AUDIO_FORMAT_U16_LE;
break;
case AUDIO_FORMAT_S32:
format = endianness ? SPA_AUDIO_FORMAT_S32_BE : SPA_AUDIO_FORMAT_S32_LE;
break;
case AUDIO_FORMAT_U32:
format = endianness ? SPA_AUDIO_FORMAT_U32_BE : SPA_AUDIO_FORMAT_U32_LE;
break;
case AUDIO_FORMAT_F32:
format = endianness ? SPA_AUDIO_FORMAT_F32_BE : SPA_AUDIO_FORMAT_F32_LE;
break;
default:
dolog("Internal logic error: Bad audio format %d\n", fmt);
format = SPA_AUDIO_FORMAT_U8;
break;
}
return format;
}
static AudioFormat
pw_to_audfmt(enum spa_audio_format fmt, int *endianness,
uint32_t *sample_size)
{
switch (fmt) {
case SPA_AUDIO_FORMAT_S8:
*sample_size = 1;
return AUDIO_FORMAT_S8;
case SPA_AUDIO_FORMAT_U8:
*sample_size = 1;
return AUDIO_FORMAT_U8;
case SPA_AUDIO_FORMAT_S16_BE:
*sample_size = 2;
*endianness = 1;
return AUDIO_FORMAT_S16;
case SPA_AUDIO_FORMAT_S16_LE:
*sample_size = 2;
*endianness = 0;
return AUDIO_FORMAT_S16;
case SPA_AUDIO_FORMAT_U16_BE:
*sample_size = 2;
*endianness = 1;
return AUDIO_FORMAT_U16;
case SPA_AUDIO_FORMAT_U16_LE:
*sample_size = 2;
*endianness = 0;
return AUDIO_FORMAT_U16;
case SPA_AUDIO_FORMAT_S32_BE:
*sample_size = 4;
*endianness = 1;
return AUDIO_FORMAT_S32;
case SPA_AUDIO_FORMAT_S32_LE:
*sample_size = 4;
*endianness = 0;
return AUDIO_FORMAT_S32;
case SPA_AUDIO_FORMAT_U32_BE:
*sample_size = 4;
*endianness = 1;
return AUDIO_FORMAT_U32;
case SPA_AUDIO_FORMAT_U32_LE:
*sample_size = 4;
*endianness = 0;
return AUDIO_FORMAT_U32;
case SPA_AUDIO_FORMAT_F32_BE:
*sample_size = 4;
*endianness = 1;
return AUDIO_FORMAT_F32;
case SPA_AUDIO_FORMAT_F32_LE:
*sample_size = 4;
*endianness = 0;
return AUDIO_FORMAT_F32;
default:
*sample_size = 1;
dolog("Internal logic error: Bad spa_audio_format %d\n", fmt);
return AUDIO_FORMAT_U8;
}
}
static int
create_stream(pwaudio *c, PWVoice *v, const char *stream_name,
const char *name, enum spa_direction dir)
{
int res;
uint32_t n_params;
const struct spa_pod *params[2];
uint8_t buffer[1024];
struct spa_pod_builder b;
uint64_t buf_samples;
struct pw_properties *props;
props = pw_properties_new(NULL, NULL);
/* 75% of the timer period for faster updates */
buf_samples = (uint64_t)v->g->dev->timer_period * v->info.rate
* 3 / 4 / 1000000;
pw_properties_setf(props, PW_KEY_NODE_LATENCY, "%" PRIu64 "/%u",
buf_samples, v->info.rate);
trace_pw_period(buf_samples, v->info.rate);
if (name) {
pw_properties_set(props, PW_KEY_TARGET_OBJECT, name);
}
v->stream = pw_stream_new(c->core, stream_name, props);
if (v->stream == NULL) {
return -1;
}
if (dir == SPA_DIRECTION_INPUT) {
pw_stream_add_listener(v->stream,
&v->stream_listener, &capture_stream_events, v);
} else {
pw_stream_add_listener(v->stream,
&v->stream_listener, &playback_stream_events, v);
}
n_params = 0;
spa_pod_builder_init(&b, buffer, sizeof(buffer));
params[n_params++] = spa_format_audio_raw_build(&b,
SPA_PARAM_EnumFormat,
&v->info);
/* connect the stream to a sink or source */
res = pw_stream_connect(v->stream,
dir ==
SPA_DIRECTION_INPUT ? PW_DIRECTION_INPUT :
PW_DIRECTION_OUTPUT, PW_ID_ANY,
PW_STREAM_FLAG_AUTOCONNECT |
PW_STREAM_FLAG_INACTIVE |
PW_STREAM_FLAG_MAP_BUFFERS |
PW_STREAM_FLAG_RT_PROCESS, params, n_params);
if (res < 0) {
pw_stream_destroy(v->stream);
return -1;
}
return 0;
}
static int
qpw_stream_new(pwaudio *c, PWVoice *v, const char *stream_name,
const char *name, enum spa_direction dir)
{
int r;
switch (v->info.channels) {
case 8:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
v->info.position[4] = SPA_AUDIO_CHANNEL_RL;
v->info.position[5] = SPA_AUDIO_CHANNEL_RR;
v->info.position[6] = SPA_AUDIO_CHANNEL_SL;
v->info.position[7] = SPA_AUDIO_CHANNEL_SR;
break;
case 6:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
v->info.position[4] = SPA_AUDIO_CHANNEL_RL;
v->info.position[5] = SPA_AUDIO_CHANNEL_RR;
break;
case 5:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
v->info.position[4] = SPA_AUDIO_CHANNEL_RC;
break;
case 4:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
v->info.position[3] = SPA_AUDIO_CHANNEL_RC;
break;
case 3:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_LFE;
break;
case 2:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
break;
case 1:
v->info.position[0] = SPA_AUDIO_CHANNEL_MONO;
break;
default:
for (size_t i = 0; i < v->info.channels; i++) {
v->info.position[i] = SPA_AUDIO_CHANNEL_UNKNOWN;
}
break;
}
/* create a new unconnected pwstream */
r = create_stream(c, v, stream_name, name, dir);
if (r < 0) {
AUD_log(AUDIO_CAP, "Failed to create stream.");
return -1;
}
return r;
}
static int
qpw_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque)
{
PWVoiceOut *pw = (PWVoiceOut *) hw;
PWVoice *v = &pw->v;
struct audsettings obt_as = *as;
pwaudio *c = v->g = drv_opaque;
AudiodevPipewireOptions *popts = &c->dev->u.pipewire;
AudiodevPipewirePerDirectionOptions *ppdo = popts->out;
int r;
pw_thread_loop_lock(c->thread_loop);
v->info.format = audfmt_to_pw(as->fmt, as->endianness);
v->info.channels = as->nchannels;
v->info.rate = as->freq;
obt_as.fmt =
pw_to_audfmt(v->info.format, &obt_as.endianness, &v->frame_size);
v->frame_size *= as->nchannels;
v->req = (uint64_t)c->dev->timer_period * v->info.rate
* 1 / 2 / 1000000 * v->frame_size;
/* call the function that creates a new stream for playback */
r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id,
ppdo->name, SPA_DIRECTION_OUTPUT);
if (r < 0) {
error_report("qpw_stream_new for playback failed");
pw_thread_loop_unlock(c->thread_loop);
return -1;
}
/* report the audio format we support */
audio_pcm_init_info(&hw->info, &obt_as);
/* report the buffer size to qemu */
hw->samples = audio_buffer_frames(
qapi_AudiodevPipewirePerDirectionOptions_base(ppdo), &obt_as, 46440);
v->highwater_mark = MIN(RINGBUFFER_SIZE,
(ppdo->has_latency ? ppdo->latency : 46440)
* (uint64_t)v->info.rate / 1000000 * v->frame_size);
pw_thread_loop_unlock(c->thread_loop);
return 0;
}
static int
qpw_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
{
PWVoiceIn *pw = (PWVoiceIn *) hw;
PWVoice *v = &pw->v;
struct audsettings obt_as = *as;
pwaudio *c = v->g = drv_opaque;
AudiodevPipewireOptions *popts = &c->dev->u.pipewire;
AudiodevPipewirePerDirectionOptions *ppdo = popts->in;
int r;
pw_thread_loop_lock(c->thread_loop);
v->info.format = audfmt_to_pw(as->fmt, as->endianness);
v->info.channels = as->nchannels;
v->info.rate = as->freq;
obt_as.fmt =
pw_to_audfmt(v->info.format, &obt_as.endianness, &v->frame_size);
v->frame_size *= as->nchannels;
/* call the function that creates a new stream for recording */
r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id,
ppdo->name, SPA_DIRECTION_INPUT);
if (r < 0) {
error_report("qpw_stream_new for recording failed");
pw_thread_loop_unlock(c->thread_loop);
return -1;
}
/* report the audio format we support */
audio_pcm_init_info(&hw->info, &obt_as);
/* report the buffer size to qemu */
hw->samples = audio_buffer_frames(
qapi_AudiodevPipewirePerDirectionOptions_base(ppdo), &obt_as, 46440);
pw_thread_loop_unlock(c->thread_loop);
return 0;
}
static void
qpw_fini_out(HWVoiceOut *hw)
{
PWVoiceOut *pw = (PWVoiceOut *) hw;
PWVoice *v = &pw->v;
if (v->stream) {
pwaudio *c = v->g;
pw_thread_loop_lock(c->thread_loop);
pw_stream_destroy(v->stream);
v->stream = NULL;
pw_thread_loop_unlock(c->thread_loop);
}
}
static void
qpw_fini_in(HWVoiceIn *hw)
{
PWVoiceIn *pw = (PWVoiceIn *) hw;
PWVoice *v = &pw->v;
if (v->stream) {
pwaudio *c = v->g;
pw_thread_loop_lock(c->thread_loop);
pw_stream_destroy(v->stream);
v->stream = NULL;
pw_thread_loop_unlock(c->thread_loop);
}
}
static void
qpw_enable_out(HWVoiceOut *hw, bool enable)
{
PWVoiceOut *po = (PWVoiceOut *) hw;
PWVoice *v = &po->v;
pwaudio *c = v->g;
pw_thread_loop_lock(c->thread_loop);
pw_stream_set_active(v->stream, enable);
pw_thread_loop_unlock(c->thread_loop);
}
static void
qpw_enable_in(HWVoiceIn *hw, bool enable)
{
PWVoiceIn *pi = (PWVoiceIn *) hw;
PWVoice *v = &pi->v;
pwaudio *c = v->g;
pw_thread_loop_lock(c->thread_loop);
pw_stream_set_active(v->stream, enable);
pw_thread_loop_unlock(c->thread_loop);
}
static void
qpw_volume_out(HWVoiceOut *hw, Volume *vol)
{
PWVoiceOut *pw = (PWVoiceOut *) hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
int i, ret;
pw_thread_loop_lock(c->thread_loop);
v->volume.channels = vol->channels;
for (i = 0; i < vol->channels; ++i) {
v->volume.values[i] = (float)vol->vol[i] / 255;
}
ret = pw_stream_set_control(v->stream,
SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0);
trace_pw_vol(ret == 0 ? "success" : "failed");
v->muted = vol->mute;
float val = v->muted ? 1.f : 0.f;
ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0);
pw_thread_loop_unlock(c->thread_loop);
}
static void
qpw_volume_in(HWVoiceIn *hw, Volume *vol)
{
PWVoiceIn *pw = (PWVoiceIn *) hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
int i, ret;
pw_thread_loop_lock(c->thread_loop);
v->volume.channels = vol->channels;
for (i = 0; i < vol->channels; ++i) {
v->volume.values[i] = (float)vol->vol[i] / 255;
}
ret = pw_stream_set_control(v->stream,
SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0);
trace_pw_vol(ret == 0 ? "success" : "failed");
v->muted = vol->mute;
float val = v->muted ? 1.f : 0.f;
ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0);
pw_thread_loop_unlock(c->thread_loop);
}
static int wait_resync(pwaudio *pw)
{
int res;
pw->pending_seq = pw_core_sync(pw->core, PW_ID_CORE, pw->pending_seq);
while (true) {
pw_thread_loop_wait(pw->thread_loop);
res = pw->error;
if (res < 0) {
pw->error = 0;
return res;
}
if (pw->pending_seq == pw->last_seq) {
break;
}
}
return 0;
}
static void
on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
{
pwaudio *pw = data;
error_report("error id:%u seq:%d res:%d (%s): %s",
id, seq, res, spa_strerror(res), message);
/* stop and exit the thread loop */
pw_thread_loop_signal(pw->thread_loop, FALSE);
}
static void
on_core_done(void *data, uint32_t id, int seq)
{
pwaudio *pw = data;
assert(id == PW_ID_CORE);
pw->last_seq = seq;
if (pw->pending_seq == seq) {
/* stop and exit the thread loop */
pw_thread_loop_signal(pw->thread_loop, FALSE);
}
}
static const struct pw_core_events core_events = {
PW_VERSION_CORE_EVENTS,
.done = on_core_done,
.error = on_core_error,
};
static void *
qpw_audio_init(Audiodev *dev)
{
g_autofree pwaudio *pw = g_new0(pwaudio, 1);
pw_init(NULL, NULL);
trace_pw_audio_init();
assert(dev->driver == AUDIODEV_DRIVER_PIPEWIRE);
pw->dev = dev;
pw->thread_loop = pw_thread_loop_new("Pipewire thread loop", NULL);
if (pw->thread_loop == NULL) {
error_report("Could not create Pipewire loop");
goto fail;
}
pw->context =
pw_context_new(pw_thread_loop_get_loop(pw->thread_loop), NULL, 0);
if (pw->context == NULL) {
error_report("Could not create Pipewire context");
goto fail;
}
if (pw_thread_loop_start(pw->thread_loop) < 0) {
error_report("Could not start Pipewire loop");
goto fail;
}
pw_thread_loop_lock(pw->thread_loop);
pw->core = pw_context_connect(pw->context, NULL, 0);
if (pw->core == NULL) {
pw_thread_loop_unlock(pw->thread_loop);
goto fail;
}
if (pw_core_add_listener(pw->core, &pw->core_listener,
&core_events, pw) < 0) {
pw_thread_loop_unlock(pw->thread_loop);
goto fail;
}
if (wait_resync(pw) < 0) {
pw_thread_loop_unlock(pw->thread_loop);
}
pw_thread_loop_unlock(pw->thread_loop);
return g_steal_pointer(&pw);
fail:
AUD_log(AUDIO_CAP, "Failed to initialize PW context");
if (pw->thread_loop) {
pw_thread_loop_stop(pw->thread_loop);
}
if (pw->context) {
g_clear_pointer(&pw->context, pw_context_destroy);
}
if (pw->thread_loop) {
g_clear_pointer(&pw->thread_loop, pw_thread_loop_destroy);
}
return NULL;
}
static void
qpw_audio_fini(void *opaque)
{
pwaudio *pw = opaque;
if (pw->thread_loop) {
pw_thread_loop_stop(pw->thread_loop);
}
if (pw->core) {
spa_hook_remove(&pw->core_listener);
spa_zero(pw->core_listener);
pw_core_disconnect(pw->core);
}
if (pw->context) {
pw_context_destroy(pw->context);
}
pw_thread_loop_destroy(pw->thread_loop);
g_free(pw);
}
static struct audio_pcm_ops qpw_pcm_ops = {
.init_out = qpw_init_out,
.fini_out = qpw_fini_out,
.write = qpw_write,
.buffer_get_free = qpw_buffer_get_free,
.run_buffer_out = audio_generic_run_buffer_out,
.enable_out = qpw_enable_out,
.volume_out = qpw_volume_out,
.volume_in = qpw_volume_in,
.init_in = qpw_init_in,
.fini_in = qpw_fini_in,
.read = qpw_read,
.run_buffer_in = audio_generic_run_buffer_in,
.enable_in = qpw_enable_in
};
static struct audio_driver pw_audio_driver = {
.name = "pipewire",
.descr = "http://www.pipewire.org/",
.init = qpw_audio_init,
.fini = qpw_audio_fini,
.pcm_ops = &qpw_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof(PWVoiceOut),
.voice_size_in = sizeof(PWVoiceIn),
};
static void
register_audio_pw(void)
{
audio_driver_register(&pw_audio_driver);
}
type_init(register_audio_pw);

View File

@ -40,8 +40,6 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf,
int64_t t;
#endif
ilast = rate->ilast;
istart = ibuf;
iend = ibuf + *isamp;
@ -59,15 +57,17 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf,
return;
}
while (obuf < oend) {
/* Safety catch to make sure we have input samples. */
/* without input samples, there's nothing to do */
if (ibuf >= iend) {
break;
*osamp = 0;
return;
}
/* read as many input samples so that ipos > opos */
ilast = rate->ilast;
while (true) {
/* read as many input samples so that ipos > opos */
while (rate->ipos <= (rate->opos >> 32)) {
ilast = *ibuf++;
rate->ipos++;
@ -78,6 +78,11 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf,
}
}
/* make sure that the next output sample can be written */
if (obuf >= oend) {
break;
}
icur = *ibuf;
/* wrap ipos and opos around long before they overflow */

View File

@ -18,6 +18,14 @@ dbus_audio_register(const char *s, const char *dir) "sender = %s, dir = %s"
dbus_audio_put_buffer_out(size_t len) "len = %zu"
dbus_audio_read(size_t len) "len = %zu"
# pwaudio.c
pw_state_changed(int nodeid, const char *s) "node id: %d stream state: %s"
pw_read(int32_t avail, uint32_t index, size_t len) "avail=%d index=%u len=%zu"
pw_write(int32_t filled, int32_t avail, uint32_t index, size_t len) "filled=%d avail=%d index=%u len=%zu"
pw_vol(const char *ret) "set volume: %s"
pw_period(uint64_t quantum, uint32_t rate) "period =%" PRIu64 "/%u"
pw_audio_init(void) "Initialize Pipewire context"
# audio.c
audio_timer_start(int interval) "interval %d ms"
audio_timer_stop(void) ""

View File

@ -59,6 +59,19 @@ struct CryptoDevBackendBuiltin {
CryptoDevBackendBuiltinSession *sessions[MAX_NUM_SESSIONS];
};
static void cryptodev_builtin_init_akcipher(CryptoDevBackend *backend)
{
QCryptoAkCipherOptions opts;
opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
opts.u.rsa.padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
if (qcrypto_akcipher_supports(&opts)) {
backend->conf.crypto_services |=
(1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER);
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
}
}
static void cryptodev_builtin_init(
CryptoDevBackend *backend, Error **errp)
{
@ -72,21 +85,18 @@ static void cryptodev_builtin_init(
return;
}
cc = cryptodev_backend_new_client(
"cryptodev-builtin", NULL);
cc = cryptodev_backend_new_client();
cc->info_str = g_strdup_printf("cryptodev-builtin0");
cc->queue_index = 0;
cc->type = CRYPTODEV_BACKEND_TYPE_BUILTIN;
cc->type = QCRYPTODEV_BACKEND_TYPE_BUILTIN;
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
1u << VIRTIO_CRYPTO_SERVICE_HASH |
1u << VIRTIO_CRYPTO_SERVICE_MAC |
1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER |
1u << QCRYPTODEV_BACKEND_SERVICE_HASH |
1u << QCRYPTODEV_BACKEND_SERVICE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
/*
* Set the Maximum length of crypto request.
* Why this value? Just avoid to overflow when
@ -95,6 +105,7 @@ static void cryptodev_builtin_init(
backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendOpInfo);
backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
cryptodev_builtin_init_akcipher(backend);
cryptodev_backend_set_ready(backend, true);
}
@ -528,17 +539,14 @@ static int cryptodev_builtin_asym_operation(
static int cryptodev_builtin_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
CryptoDevBackendOpInfo *op_info)
{
CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend);
CryptoDevBackendBuiltinSession *sess;
CryptoDevBackendSymOpInfo *sym_op_info;
CryptoDevBackendAsymOpInfo *asym_op_info;
enum CryptoDevBackendAlgType algtype = op_info->algtype;
QCryptodevBackendAlgType algtype = op_info->algtype;
int status = -VIRTIO_CRYPTO_ERR;
Error *local_error = NULL;
@ -550,11 +558,11 @@ static int cryptodev_builtin_operation(
}
sess = builtin->sessions[op_info->session_id];
if (algtype == CRYPTODEV_BACKEND_ALG_SYM) {
if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
sym_op_info = op_info->u.sym_op_info;
status = cryptodev_builtin_sym_operation(sess, sym_op_info,
&local_error);
} else if (algtype == CRYPTODEV_BACKEND_ALG_ASYM) {
} else if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
asym_op_info = op_info->u.asym_op_info;
status = cryptodev_builtin_asym_operation(sess, op_info->op_code,
asym_op_info, &local_error);
@ -563,8 +571,8 @@ static int cryptodev_builtin_operation(
if (local_error) {
error_report_err(local_error);
}
if (cb) {
cb(opaque, status);
if (op_info->cb) {
op_info->cb(op_info->opaque, status);
}
return 0;
}

View File

@ -0,0 +1,54 @@
/*
* HMP commands related to cryptodev
*
* Copyright (c) 2023 Bytedance.Inc
*
* Authors:
* zhenwei pi<pizhenwei@bytedance.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version.
*/
#include "qemu/osdep.h"
#include "monitor/hmp.h"
#include "monitor/monitor.h"
#include "qapi/qapi-commands-cryptodev.h"
#include "qapi/qmp/qdict.h"
void hmp_info_cryptodev(Monitor *mon, const QDict *qdict)
{
QCryptodevInfoList *il;
QCryptodevBackendServiceTypeList *sl;
QCryptodevBackendClientList *cl;
for (il = qmp_query_cryptodev(NULL); il; il = il->next) {
g_autofree char *services = NULL;
QCryptodevInfo *info = il->value;
char *tmp_services;
/* build a string like 'service=[akcipher|mac|hash|cipher]' */
for (sl = info->service; sl; sl = sl->next) {
const char *service = QCryptodevBackendServiceType_str(sl->value);
if (!services) {
services = g_strdup(service);
} else {
tmp_services = g_strjoin("|", services, service, NULL);
g_free(services);
services = tmp_services;
}
}
monitor_printf(mon, "%s: service=[%s]\n", info->id, services);
for (cl = info->client; cl; cl = cl->next) {
QCryptodevBackendClient *client = cl->value;
monitor_printf(mon, " queue %" PRIu32 ": type=%s\n",
client->queue,
QCryptodevBackendType_str(client->type));
}
}
qapi_free_QCryptodevInfoList(il);
}

View File

@ -223,14 +223,14 @@ static void cryptodev_lkcf_init(CryptoDevBackend *backend, Error **errp)
return;
}
cc = cryptodev_backend_new_client("cryptodev-lkcf", NULL);
cc = cryptodev_backend_new_client();
cc->info_str = g_strdup_printf("cryptodev-lkcf0");
cc->queue_index = 0;
cc->type = CRYPTODEV_BACKEND_TYPE_LKCF;
cc->type = QCRYPTODEV_BACKEND_TYPE_LKCF;
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER;
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
lkcf->running = true;
@ -469,15 +469,12 @@ static void *cryptodev_lkcf_worker(void *arg)
static int cryptodev_lkcf_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
CryptoDevBackendOpInfo *op_info)
{
CryptoDevBackendLKCF *lkcf =
CRYPTODEV_BACKEND_LKCF(backend);
CryptoDevBackendLKCFSession *sess;
enum CryptoDevBackendAlgType algtype = op_info->algtype;
QCryptodevBackendAlgType algtype = op_info->algtype;
CryptoDevLKCFTask *task;
if (op_info->session_id >= MAX_SESSIONS ||
@ -488,15 +485,15 @@ static int cryptodev_lkcf_operation(
}
sess = lkcf->sess[op_info->session_id];
if (algtype != CRYPTODEV_BACKEND_ALG_ASYM) {
if (algtype != QCRYPTODEV_BACKEND_ALG_ASYM) {
error_report("algtype not supported: %u", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
task = g_new0(CryptoDevLKCFTask, 1);
task->op_info = op_info;
task->cb = cb;
task->opaque = opaque;
task->cb = op_info->cb;
task->opaque = op_info->opaque;
task->sess = sess;
task->lkcf = lkcf;
task->status = -VIRTIO_CRYPTO_ERR;

View File

@ -67,7 +67,7 @@ cryptodev_vhost_user_get_vhost(
{
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(b);
assert(cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER);
assert(cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER);
assert(queue < MAX_CRYPTO_QUEUE_NUM);
return s->vhost_crypto[queue];
@ -198,12 +198,11 @@ static void cryptodev_vhost_user_init(
s->opened = true;
for (i = 0; i < queues; i++) {
cc = cryptodev_backend_new_client(
"cryptodev-vhost-user", NULL);
cc = cryptodev_backend_new_client();
cc->info_str = g_strdup_printf("cryptodev-vhost-user%zu to %s ",
i, chr->label);
cc->queue_index = i;
cc->type = CRYPTODEV_BACKEND_TYPE_VHOST_USER;
cc->type = QCRYPTODEV_BACKEND_TYPE_VHOST_USER;
backend->conf.peers.ccs[i] = cc;
@ -222,9 +221,9 @@ static void cryptodev_vhost_user_init(
cryptodev_vhost_user_event, NULL, s, NULL, true);
backend->conf.crypto_services =
1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
1u << VIRTIO_CRYPTO_SERVICE_HASH |
1u << VIRTIO_CRYPTO_SERVICE_MAC;
1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER |
1u << QCRYPTODEV_BACKEND_SERVICE_HASH |
1u << QCRYPTODEV_BACKEND_SERVICE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;

View File

@ -127,7 +127,7 @@ cryptodev_get_vhost(CryptoDevBackendClient *cc,
switch (cc->type) {
#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
case CRYPTODEV_BACKEND_TYPE_VHOST_USER:
case QCRYPTODEV_BACKEND_TYPE_VHOST_USER:
vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue);
break;
#endif
@ -195,7 +195,7 @@ int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
* because vhost user doesn't interrupt masking/unmasking
* properly.
*/
if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) {
if (cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER) {
dev->use_guest_notifier_mask = false;
}
}

View File

@ -23,29 +23,92 @@
#include "qemu/osdep.h"
#include "sysemu/cryptodev.h"
#include "sysemu/stats.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-cryptodev.h"
#include "qapi/qapi-types-stats.h"
#include "qapi/visitor.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qom/object_interfaces.h"
#include "hw/virtio/virtio-crypto.h"
#define SYM_ENCRYPT_OPS_STR "sym-encrypt-ops"
#define SYM_DECRYPT_OPS_STR "sym-decrypt-ops"
#define SYM_ENCRYPT_BYTES_STR "sym-encrypt-bytes"
#define SYM_DECRYPT_BYTES_STR "sym-decrypt-bytes"
#define ASYM_ENCRYPT_OPS_STR "asym-encrypt-ops"
#define ASYM_DECRYPT_OPS_STR "asym-decrypt-ops"
#define ASYM_SIGN_OPS_STR "asym-sign-ops"
#define ASYM_VERIFY_OPS_STR "asym-verify-ops"
#define ASYM_ENCRYPT_BYTES_STR "asym-encrypt-bytes"
#define ASYM_DECRYPT_BYTES_STR "asym-decrypt-bytes"
#define ASYM_SIGN_BYTES_STR "asym-sign-bytes"
#define ASYM_VERIFY_BYTES_STR "asym-verify-bytes"
typedef struct StatsArgs {
union StatsResultsType {
StatsResultList **stats;
StatsSchemaList **schema;
} result;
strList *names;
Error **errp;
} StatsArgs;
static QTAILQ_HEAD(, CryptoDevBackendClient) crypto_clients;
static int qmp_query_cryptodev_foreach(Object *obj, void *data)
{
CryptoDevBackend *backend;
QCryptodevInfoList **infolist = data;
uint32_t services, i;
CryptoDevBackendClient *
cryptodev_backend_new_client(const char *model,
const char *name)
if (!object_dynamic_cast(obj, TYPE_CRYPTODEV_BACKEND)) {
return 0;
}
QCryptodevInfo *info = g_new0(QCryptodevInfo, 1);
info->id = g_strdup(object_get_canonical_path_component(obj));
backend = CRYPTODEV_BACKEND(obj);
services = backend->conf.crypto_services;
for (i = 0; i < QCRYPTODEV_BACKEND_SERVICE__MAX; i++) {
if (services & (1 << i)) {
QAPI_LIST_PREPEND(info->service, i);
}
}
for (i = 0; i < backend->conf.peers.queues; i++) {
CryptoDevBackendClient *cc = backend->conf.peers.ccs[i];
QCryptodevBackendClient *client = g_new0(QCryptodevBackendClient, 1);
client->queue = cc->queue_index;
client->type = cc->type;
QAPI_LIST_PREPEND(info->client, client);
}
QAPI_LIST_PREPEND(*infolist, info);
return 0;
}
QCryptodevInfoList *qmp_query_cryptodev(Error **errp)
{
QCryptodevInfoList *list = NULL;
Object *objs = container_get(object_get_root(), "/objects");
object_child_foreach(objs, qmp_query_cryptodev_foreach, &list);
return list;
}
CryptoDevBackendClient *cryptodev_backend_new_client(void)
{
CryptoDevBackendClient *cc;
cc = g_new0(CryptoDevBackendClient, 1);
cc->model = g_strdup(model);
if (name) {
cc->name = g_strdup(name);
}
QTAILQ_INSERT_TAIL(&crypto_clients, cc, next);
return cc;
@ -55,8 +118,6 @@ void cryptodev_backend_free_client(
CryptoDevBackendClient *cc)
{
QTAILQ_REMOVE(&crypto_clients, cc, next);
g_free(cc->name);
g_free(cc->model);
g_free(cc->info_str);
g_free(cc);
}
@ -71,6 +132,9 @@ void cryptodev_backend_cleanup(
if (bc->cleanup) {
bc->cleanup(backend, errp);
}
g_free(backend->sym_stat);
g_free(backend->asym_stat);
}
int cryptodev_backend_create_session(
@ -107,38 +171,111 @@ int cryptodev_backend_close_session(
static int cryptodev_backend_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
CryptoDevBackendOpInfo *op_info)
{
CryptoDevBackendClass *bc =
CRYPTODEV_BACKEND_GET_CLASS(backend);
if (bc->do_op) {
return bc->do_op(backend, op_info, queue_index, cb, opaque);
return bc->do_op(backend, op_info);
}
return -VIRTIO_CRYPTO_NOTSUPP;
}
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
void *opaque1,
uint32_t queue_index,
CryptoDevCompletionFunc cb, void *opaque2)
static int cryptodev_backend_account(CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info)
{
VirtIOCryptoReq *req = opaque1;
CryptoDevBackendOpInfo *op_info = &req->op_info;
enum CryptoDevBackendAlgType algtype = req->flags;
enum QCryptodevBackendAlgType algtype = op_info->algtype;
int len;
if ((algtype != CRYPTODEV_BACKEND_ALG_SYM)
&& (algtype != CRYPTODEV_BACKEND_ALG_ASYM)) {
if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
CryptoDevBackendAsymOpInfo *asym_op_info = op_info->u.asym_op_info;
len = asym_op_info->src_len;
switch (op_info->op_code) {
case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
CryptodevAsymStatIncEncrypt(backend, len);
break;
case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
CryptodevAsymStatIncDecrypt(backend, len);
break;
case VIRTIO_CRYPTO_AKCIPHER_SIGN:
CryptodevAsymStatIncSign(backend, len);
break;
case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
CryptodevAsymStatIncVerify(backend, len);
break;
default:
return -VIRTIO_CRYPTO_NOTSUPP;
}
} else if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
CryptoDevBackendSymOpInfo *sym_op_info = op_info->u.sym_op_info;
len = sym_op_info->src_len;
switch (op_info->op_code) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
CryptodevSymStatIncEncrypt(backend, len);
break;
case VIRTIO_CRYPTO_CIPHER_DECRYPT:
CryptodevSymStatIncDecrypt(backend, len);
break;
default:
return -VIRTIO_CRYPTO_NOTSUPP;
}
} else {
error_report("Unsupported cryptodev alg type: %" PRIu32 "", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
return cryptodev_backend_operation(backend, op_info, queue_index,
cb, opaque2);
return len;
}
static void cryptodev_backend_throttle_timer_cb(void *opaque)
{
CryptoDevBackend *backend = (CryptoDevBackend *)opaque;
CryptoDevBackendOpInfo *op_info, *tmpop;
int ret;
QTAILQ_FOREACH_SAFE(op_info, &backend->opinfos, next, tmpop) {
QTAILQ_REMOVE(&backend->opinfos, op_info, next);
ret = cryptodev_backend_account(backend, op_info);
if (ret < 0) {
op_info->cb(op_info->opaque, ret);
continue;
}
throttle_account(&backend->ts, true, ret);
cryptodev_backend_operation(backend, op_info);
if (throttle_enabled(&backend->tc) &&
throttle_schedule_timer(&backend->ts, &backend->tt, true)) {
break;
}
}
}
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info)
{
int ret;
if (!throttle_enabled(&backend->tc)) {
goto do_account;
}
if (throttle_schedule_timer(&backend->ts, &backend->tt, true) ||
!QTAILQ_EMPTY(&backend->opinfos)) {
QTAILQ_INSERT_TAIL(&backend->opinfos, op_info, next);
return 0;
}
do_account:
ret = cryptodev_backend_account(backend, op_info);
if (ret < 0) {
return ret;
}
throttle_account(&backend->ts, true, ret);
return cryptodev_backend_operation(backend, op_info);
}
static void
@ -169,15 +306,111 @@ cryptodev_backend_set_queues(Object *obj, Visitor *v, const char *name,
backend->conf.peers.queues = value;
}
static void cryptodev_backend_set_throttle(CryptoDevBackend *backend, int field,
uint64_t value, Error **errp)
{
uint64_t orig = backend->tc.buckets[field].avg;
bool enabled = throttle_enabled(&backend->tc);
if (orig == value) {
return;
}
backend->tc.buckets[field].avg = value;
if (!throttle_enabled(&backend->tc)) {
throttle_timers_destroy(&backend->tt);
cryptodev_backend_throttle_timer_cb(backend); /* drain opinfos */
return;
}
if (!throttle_is_valid(&backend->tc, errp)) {
backend->tc.buckets[field].avg = orig; /* revert change */
return;
}
if (!enabled) {
throttle_init(&backend->ts);
throttle_timers_init(&backend->tt, qemu_get_aio_context(),
QEMU_CLOCK_REALTIME,
cryptodev_backend_throttle_timer_cb, /* FIXME */
cryptodev_backend_throttle_timer_cb, backend);
}
throttle_config(&backend->ts, QEMU_CLOCK_REALTIME, &backend->tc);
}
static void cryptodev_backend_get_bps(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
uint64_t value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg;
visit_type_uint64(v, name, &value, errp);
}
static void cryptodev_backend_set_bps(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
uint64_t value;
if (!visit_type_uint64(v, name, &value, errp)) {
return;
}
cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp);
}
static void cryptodev_backend_get_ops(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
uint64_t value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg;
visit_type_uint64(v, name, &value, errp);
}
static void cryptodev_backend_set_ops(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
uint64_t value;
if (!visit_type_uint64(v, name, &value, errp)) {
return;
}
cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp);
}
static void
cryptodev_backend_complete(UserCreatable *uc, Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc);
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc);
uint32_t services;
uint64_t value;
QTAILQ_INIT(&backend->opinfos);
value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg;
cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp);
value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg;
cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp);
if (bc->init) {
bc->init(backend, errp);
}
services = backend->conf.crypto_services;
if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) {
backend->sym_stat = g_new0(CryptodevBackendSymStat, 1);
}
if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) {
backend->asym_stat = g_new0(CryptodevBackendAsymStat, 1);
}
}
void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used)
@ -208,8 +441,12 @@ cryptodev_backend_can_be_deleted(UserCreatable *uc)
static void cryptodev_backend_instance_init(Object *obj)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
/* Initialize devices' queues property to 1 */
object_property_set_int(obj, "queues", 1, NULL);
throttle_config_init(&backend->tc);
}
static void cryptodev_backend_finalize(Object *obj)
@ -217,6 +454,137 @@ static void cryptodev_backend_finalize(Object *obj)
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
cryptodev_backend_cleanup(backend, NULL);
if (throttle_enabled(&backend->tc)) {
throttle_timers_destroy(&backend->tt);
}
}
static StatsList *cryptodev_backend_stats_add(const char *name, int64_t *val,
StatsList *stats_list)
{
Stats *stats = g_new0(Stats, 1);
stats->name = g_strdup(name);
stats->value = g_new0(StatsValue, 1);
stats->value->type = QTYPE_QNUM;
stats->value->u.scalar = *val;
QAPI_LIST_PREPEND(stats_list, stats);
return stats_list;
}
static int cryptodev_backend_stats_query(Object *obj, void *data)
{
StatsArgs *stats_args = data;
StatsResultList **stats_results = stats_args->result.stats;
StatsList *stats_list = NULL;
StatsResult *entry;
CryptoDevBackend *backend;
CryptodevBackendSymStat *sym_stat;
CryptodevBackendAsymStat *asym_stat;
if (!object_dynamic_cast(obj, TYPE_CRYPTODEV_BACKEND)) {
return 0;
}
backend = CRYPTODEV_BACKEND(obj);
sym_stat = backend->sym_stat;
if (sym_stat) {
stats_list = cryptodev_backend_stats_add(SYM_ENCRYPT_OPS_STR,
&sym_stat->encrypt_ops, stats_list);
stats_list = cryptodev_backend_stats_add(SYM_DECRYPT_OPS_STR,
&sym_stat->decrypt_ops, stats_list);
stats_list = cryptodev_backend_stats_add(SYM_ENCRYPT_BYTES_STR,
&sym_stat->encrypt_bytes, stats_list);
stats_list = cryptodev_backend_stats_add(SYM_DECRYPT_BYTES_STR,
&sym_stat->decrypt_bytes, stats_list);
}
asym_stat = backend->asym_stat;
if (asym_stat) {
stats_list = cryptodev_backend_stats_add(ASYM_ENCRYPT_OPS_STR,
&asym_stat->encrypt_ops, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_DECRYPT_OPS_STR,
&asym_stat->decrypt_ops, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_SIGN_OPS_STR,
&asym_stat->sign_ops, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_VERIFY_OPS_STR,
&asym_stat->verify_ops, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_ENCRYPT_BYTES_STR,
&asym_stat->encrypt_bytes, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_DECRYPT_BYTES_STR,
&asym_stat->decrypt_bytes, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_SIGN_BYTES_STR,
&asym_stat->sign_bytes, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_VERIFY_BYTES_STR,
&asym_stat->verify_bytes, stats_list);
}
entry = g_new0(StatsResult, 1);
entry->provider = STATS_PROVIDER_CRYPTODEV;
entry->qom_path = g_strdup(object_get_canonical_path(obj));
entry->stats = stats_list;
QAPI_LIST_PREPEND(*stats_results, entry);
return 0;
}
static void cryptodev_backend_stats_cb(StatsResultList **result,
StatsTarget target,
strList *names, strList *targets,
Error **errp)
{
switch (target) {
case STATS_TARGET_CRYPTODEV:
{
Object *objs = container_get(object_get_root(), "/objects");
StatsArgs stats_args;
stats_args.result.stats = result;
stats_args.names = names;
stats_args.errp = errp;
object_child_foreach(objs, cryptodev_backend_stats_query, &stats_args);
break;
}
default:
break;
}
}
static StatsSchemaValueList *cryptodev_backend_schemas_add(const char *name,
StatsSchemaValueList *list)
{
StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
schema_entry->value = g_new0(StatsSchemaValue, 1);
schema_entry->value->type = STATS_TYPE_CUMULATIVE;
schema_entry->value->name = g_strdup(name);
schema_entry->next = list;
return schema_entry;
}
static void cryptodev_backend_schemas_cb(StatsSchemaList **result,
Error **errp)
{
StatsSchemaValueList *stats_list = NULL;
const char *sym_stats[] = { SYM_ENCRYPT_OPS_STR, SYM_DECRYPT_OPS_STR,
SYM_ENCRYPT_BYTES_STR, SYM_DECRYPT_BYTES_STR };
const char *asym_stats[] = { ASYM_ENCRYPT_OPS_STR, ASYM_DECRYPT_OPS_STR,
ASYM_SIGN_OPS_STR, ASYM_VERIFY_OPS_STR,
ASYM_ENCRYPT_BYTES_STR, ASYM_DECRYPT_BYTES_STR,
ASYM_SIGN_BYTES_STR, ASYM_VERIFY_BYTES_STR };
for (int i = 0; i < ARRAY_SIZE(sym_stats); i++) {
stats_list = cryptodev_backend_schemas_add(sym_stats[i], stats_list);
}
for (int i = 0; i < ARRAY_SIZE(asym_stats); i++) {
stats_list = cryptodev_backend_schemas_add(asym_stats[i], stats_list);
}
add_stats_schema(result, STATS_PROVIDER_CRYPTODEV, STATS_TARGET_CRYPTODEV,
stats_list);
}
static void
@ -232,6 +600,17 @@ cryptodev_backend_class_init(ObjectClass *oc, void *data)
cryptodev_backend_get_queues,
cryptodev_backend_set_queues,
NULL, NULL);
object_class_property_add(oc, "throttle-bps", "uint64",
cryptodev_backend_get_bps,
cryptodev_backend_set_bps,
NULL, NULL);
object_class_property_add(oc, "throttle-ops", "uint64",
cryptodev_backend_get_ops,
cryptodev_backend_set_ops,
NULL, NULL);
add_stats_callbacks(STATS_PROVIDER_CRYPTODEV, cryptodev_backend_stats_cb,
cryptodev_backend_schemas_cb);
}
static const TypeInfo cryptodev_backend_info = {

View File

@ -27,6 +27,7 @@ struct HostMemoryBackendFile {
char *mem_path;
uint64_t align;
uint64_t offset;
bool discard_data;
bool is_pmem;
bool readonly;
@ -58,7 +59,8 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
ram_flags |= fb->is_pmem ? RAM_PMEM : 0;
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name,
backend->size, fb->align, ram_flags,
fb->mem_path, fb->readonly, errp);
fb->mem_path, fb->offset, fb->readonly,
errp);
g_free(name);
#endif
}
@ -125,6 +127,36 @@ static void file_memory_backend_set_align(Object *o, Visitor *v,
fb->align = val;
}
static void file_memory_backend_get_offset(Object *o, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
uint64_t val = fb->offset;
visit_type_size(v, name, &val, errp);
}
static void file_memory_backend_set_offset(Object *o, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
uint64_t val;
if (host_memory_backend_mr_inited(backend)) {
error_setg(errp, "cannot change property '%s' of %s", name,
object_get_typename(o));
return;
}
if (!visit_type_size(v, name, &val, errp)) {
return;
}
fb->offset = val;
}
#ifdef CONFIG_LIBPMEM
static bool file_memory_backend_get_pmem(Object *o, Error **errp)
{
@ -197,6 +229,12 @@ file_backend_class_init(ObjectClass *oc, void *data)
file_memory_backend_get_align,
file_memory_backend_set_align,
NULL, NULL);
object_class_property_add(oc, "offset", "int",
file_memory_backend_get_offset,
file_memory_backend_set_offset,
NULL, NULL);
object_class_property_set_description(oc, "offset",
"Offset into the target file (ex: 1G)");
#ifdef CONFIG_LIBPMEM
object_class_property_add_bool(oc, "pmem",
file_memory_backend_get_pmem, file_memory_backend_set_pmem);

View File

@ -1,5 +1,6 @@
softmmu_ss.add([files(
'cryptodev-builtin.c',
'cryptodev-hmp-cmds.c',
'cryptodev.c',
'hostmem-ram.c',
'hostmem.c',

View File

@ -100,8 +100,6 @@ bool tpm_backend_had_startup_error(TPMBackend *s)
void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
{
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
if (s->cmd != NULL) {
error_report("There is a TPM request pending");
return;
@ -109,7 +107,7 @@ void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
s->cmd = cmd;
object_ref(OBJECT(s));
thread_pool_submit_aio(pool, tpm_backend_worker_thread, s,
thread_pool_submit_aio(tpm_backend_worker_thread, s,
tpm_backend_request_completed, s);
}

View File

@ -573,13 +573,13 @@ static int tpm_emulator_prepare_data_fd(TPMEmulator *tpm_emu)
goto err_exit;
}
closesocket(fds[1]);
close(fds[1]);
return 0;
err_exit:
closesocket(fds[0]);
closesocket(fds[1]);
close(fds[0]);
close(fds[1]);
return -1;
}

View File

@ -20,12 +20,6 @@
#include "io/channel-command.h"
#include "hw/virtio/virtio-bus.h"
static bool
ioeventfd_enabled(void)
{
return kvm_enabled() && kvm_eventfds_enabled();
}
int
vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
unsigned nvqs, Error **errp)
@ -34,11 +28,6 @@ vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
assert(!b->vdev && vdev);
if (!ioeventfd_enabled()) {
error_setg(errp, "vhost initialization failed: requires kvm");
return -1;
}
if (!vhost_user_init(&b->vhost_user, &b->chr, errp)) {
return -1;
}

125
block.c
View File

@ -533,7 +533,6 @@ int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename,
int ret;
GLOBAL_STATE_CODE();
ERRP_GUARD();
assert_bdrv_graph_readable();
if (!drv->bdrv_co_create_opts) {
error_setg(errp, "Driver '%s' does not support image creation",
@ -680,7 +679,7 @@ int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv,
ret = 0;
out:
blk_unref(blk);
blk_co_unref(blk);
return ret;
}
@ -1610,10 +1609,11 @@ out:
* bdrv_refresh_total_sectors() which polls when called from non-coroutine
* context.
*/
static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv,
const char *node_name, QDict *options,
int open_flags, Error **errp)
static int no_coroutine_fn GRAPH_UNLOCKED
bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
QDict *options, int open_flags, Error **errp)
{
AioContext *ctx;
Error *local_err = NULL;
int i, ret;
GLOBAL_STATE_CODE();
@ -1661,13 +1661,22 @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv,
bs->supported_read_flags |= BDRV_REQ_REGISTERED_BUF;
bs->supported_write_flags |= BDRV_REQ_REGISTERED_BUF;
/* Get the context after .bdrv_open, it can change the context */
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
ret = bdrv_refresh_total_sectors(bs, bs->total_sectors);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not refresh total sector count");
aio_context_release(ctx);
return ret;
}
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(bs, NULL, &local_err);
bdrv_graph_rdunlock_main_loop();
aio_context_release(ctx);
if (local_err) {
error_propagate(errp, local_err);
return -EINVAL;
@ -3419,7 +3428,9 @@ static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs,
}
out:
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(parent_bs, tran, NULL);
bdrv_graph_rdunlock_main_loop();
return 0;
}
@ -3474,6 +3485,8 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
* itself, all options starting with "${bdref_key}." are considered part of the
* BlockdevRef.
*
* The caller must hold the main AioContext lock.
*
* TODO Can this be unified with bdrv_open_image()?
*/
int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
@ -3640,6 +3653,9 @@ done:
* BlockdevRef.
*
* The BlockdevRef will be removed from the options QDict.
*
* @parent can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
BdrvChild *bdrv_open_child(const char *filename,
QDict *options, const char *bdref_key,
@ -3664,6 +3680,9 @@ BdrvChild *bdrv_open_child(const char *filename,
/*
* Wrapper on bdrv_open_child() for most popular case: open primary child of bs.
*
* @parent can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
int bdrv_open_file_child(const char *filename,
QDict *options, const char *bdref_key,
@ -3806,9 +3825,7 @@ out:
* should be opened. If specified, neither options nor a filename may be given,
* nor can an existing BDS be reused (that is, *pbs has to be NULL).
*
* The caller must always hold @filename AioContext lock, because this
* function eventually calls bdrv_refresh_total_sectors() which polls
* when called from non-coroutine context.
* The caller must always hold the main AioContext lock.
*/
static BlockDriverState * no_coroutine_fn
bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
@ -4096,11 +4113,7 @@ close_and_fail:
return NULL;
}
/*
* The caller must always hold @filename AioContext lock, because this
* function eventually calls bdrv_refresh_total_sectors() which polls
* when called from non-coroutine context.
*/
/* The caller must always hold the main AioContext lock. */
BlockDriverState *bdrv_open(const char *filename, const char *reference,
QDict *options, int flags, Error **errp)
{
@ -4917,7 +4930,10 @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state)
qdict_del(bs->explicit_options, "backing");
qdict_del(bs->options, "backing");
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(bs, NULL, NULL);
bdrv_graph_rdunlock_main_loop();
bdrv_refresh_total_sectors(bs, bs->total_sectors);
}
/*
@ -5315,7 +5331,9 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
out:
tran_finalize(tran, ret);
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(bs_top, NULL, NULL);
bdrv_graph_rdunlock_main_loop();
if (new_context && old_context != new_context) {
aio_context_release(new_context);
@ -5381,12 +5399,17 @@ static void bdrv_delete(BlockDriverState *bs)
* empty set of options. The reference to the QDict belongs to the block layer
* after the call (even on failure), so if the caller intends to reuse the
* dictionary, it needs to use qobject_ref() before calling bdrv_open.
*
* The caller holds the AioContext lock for @bs. It must make sure that @bs
* stays in the same AioContext, i.e. @options must not refer to nodes in a
* different AioContext.
*/
BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
int flags, Error **errp)
{
ERRP_GUARD();
int ret;
AioContext *ctx = bdrv_get_aio_context(bs);
BlockDriverState *new_node_bs = NULL;
const char *drvname, *node_name;
BlockDriver *drv;
@ -5407,8 +5430,14 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
GLOBAL_STATE_CODE();
aio_context_release(ctx);
aio_context_acquire(qemu_get_aio_context());
new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags,
errp);
aio_context_release(qemu_get_aio_context());
aio_context_acquire(ctx);
assert(bdrv_get_aio_context(bs) == ctx);
options = NULL; /* bdrv_new_open_driver() eats options */
if (!new_node_bs) {
error_prepend(errp, "Could not create node: ");
@ -5749,7 +5778,8 @@ exit:
* sums the size of all data-bearing children. (This excludes backing
* children.)
*/
static int64_t bdrv_sum_allocated_file_size(BlockDriverState *bs)
static int64_t coroutine_fn GRAPH_RDLOCK
bdrv_sum_allocated_file_size(BlockDriverState *bs)
{
BdrvChild *child;
int64_t child_size, sum = 0;
@ -5777,6 +5807,7 @@ int64_t coroutine_fn bdrv_co_get_allocated_file_size(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
IO_CODE();
assert_bdrv_graph_readable();
if (!drv) {
return -ENOMEDIUM;
@ -5849,7 +5880,7 @@ int64_t coroutine_fn bdrv_co_nb_sectors(BlockDriverState *bs)
if (!drv)
return -ENOMEDIUM;
if (drv->has_variable_length) {
if (bs->bl.has_variable_length) {
int ret = bdrv_co_refresh_total_sectors(bs, bs->total_sectors);
if (ret < 0) {
return ret;
@ -5858,6 +5889,28 @@ int64_t coroutine_fn bdrv_co_nb_sectors(BlockDriverState *bs)
return bs->total_sectors;
}
/*
* This wrapper is written by hand because this function is in the hot I/O path,
* via blk_get_geometry.
*/
int64_t coroutine_mixed_fn bdrv_nb_sectors(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
IO_CODE();
if (!drv)
return -ENOMEDIUM;
if (bs->bl.has_variable_length) {
int ret = bdrv_refresh_total_sectors(bs, bs->total_sectors);
if (ret < 0) {
return ret;
}
}
return bs->total_sectors;
}
/**
* Return length in bytes on success, -errno on error.
* The length is always a multiple of BDRV_SECTOR_SIZE.
@ -5878,15 +5931,6 @@ int64_t coroutine_fn bdrv_co_getlength(BlockDriverState *bs)
return ret * BDRV_SECTOR_SIZE;
}
/* return 0 as number of sectors if no device present or error */
void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
{
int64_t nb_sectors = bdrv_nb_sectors(bs);
IO_CODE();
*nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
}
bool bdrv_is_sg(BlockDriverState *bs)
{
IO_CODE();
@ -6333,6 +6377,8 @@ int coroutine_fn bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
int ret;
BlockDriver *drv = bs->drv;
IO_CODE();
assert_bdrv_graph_readable();
/* if bs->drv == NULL, bs is closed, so there's nothing to do here */
if (!drv) {
return -ENOMEDIUM;
@ -6381,6 +6427,8 @@ BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs)
void coroutine_fn bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event)
{
IO_CODE();
assert_bdrv_graph_readable();
if (!bs || !bs->drv || !bs->drv->bdrv_co_debug_event) {
return;
}
@ -7015,6 +7063,8 @@ void bdrv_img_create(const char *filename, const char *fmt,
return;
}
aio_context_acquire(qemu_get_aio_context());
/* Create parameter list */
create_opts = qemu_opts_append(create_opts, drv->create_opts);
create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
@ -7164,6 +7214,7 @@ out:
qemu_opts_del(opts);
qemu_opts_free(create_opts);
error_propagate(errp, local_err);
aio_context_release(qemu_get_aio_context());
}
AioContext *bdrv_get_aio_context(BlockDriverState *bs)
@ -7254,9 +7305,6 @@ static void bdrv_detach_aio_context(BlockDriverState *bs)
bs->drv->bdrv_detach_aio_context(bs);
}
if (bs->quiesce_counter) {
aio_enable_external(bs->aio_context);
}
bs->aio_context = NULL;
}
@ -7266,10 +7314,6 @@ static void bdrv_attach_aio_context(BlockDriverState *bs,
BdrvAioNotifier *ban, *ban_tmp;
GLOBAL_STATE_CODE();
if (bs->quiesce_counter) {
aio_disable_external(new_context);
}
bs->aio_context = new_context;
if (bs->drv && bs->drv->bdrv_attach_aio_context) {
@ -7953,6 +7997,25 @@ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs,
return;
}
/*
* Non-zoned block drivers do not follow zoned storage constraints
* (i.e. sequential writes to zones). Refuse mixing zoned and non-zoned
* drivers in a graph.
*/
if (!parent_bs->drv->supports_zoned_children &&
child_bs->bl.zoned == BLK_Z_HM) {
/*
* The host-aware model allows zoned storage constraints and random
* write. Allow mixing host-aware and non-zoned drivers. Using
* host-aware device as a regular device.
*/
error_setg(errp, "Cannot add a %s child to a %s parent",
child_bs->bl.zoned == BLK_Z_HM ? "zoned" : "non-zoned",
parent_bs->drv->supports_zoned_children ?
"support zoned children" : "not support zoned children");
return;
}
if (!QLIST_EMPTY(&child_bs->parents)) {
error_setg(errp, "The node %s already has a parent",
child_bs->node_name);

View File

@ -46,6 +46,7 @@ static int coroutine_fn blockdev_amend_run(Job *job, Error **errp)
{
BlockdevAmendJob *s = container_of(job, BlockdevAmendJob, common);
int ret;
GRAPH_RDLOCK_GUARD();
job_progress_set_remaining(&s->common, 1);
ret = s->bs->drv->bdrv_co_amend(s->bs, s->opts, s->force, errp);
@ -54,7 +55,8 @@ static int coroutine_fn blockdev_amend_run(Job *job, Error **errp)
return ret;
}
static int blockdev_amend_pre_run(BlockdevAmendJob *s, Error **errp)
static int GRAPH_RDLOCK
blockdev_amend_pre_run(BlockdevAmendJob *s, Error **errp)
{
if (s->bs->drv->bdrv_amend_pre_run) {
return s->bs->drv->bdrv_amend_pre_run(s->bs, errp);
@ -67,9 +69,11 @@ static void blockdev_amend_free(Job *job)
{
BlockdevAmendJob *s = container_of(job, BlockdevAmendJob, common);
bdrv_graph_rdlock_main_loop();
if (s->bs->drv->bdrv_amend_clean) {
s->bs->drv->bdrv_amend_clean(s->bs);
}
bdrv_graph_rdunlock_main_loop();
bdrv_unref(s->bs);
}
@ -93,6 +97,8 @@ void qmp_x_blockdev_amend(const char *job_id,
BlockDriver *drv = bdrv_find_format(fmt);
BlockDriverState *bs;
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_lookup_bs(NULL, node_name, errp);
if (!bs) {
return;

View File

@ -583,8 +583,8 @@ out:
return ret;
}
static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
BlkdebugIOType iotype)
static int coroutine_fn rule_check(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, BlkdebugIOType iotype)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugRule *rule = NULL;

View File

@ -306,23 +306,18 @@ static void blkio_attach_aio_context(BlockDriverState *bs,
{
BDRVBlkioState *s = bs->opaque;
aio_set_fd_handler(new_context,
s->completion_fd,
false,
blkio_completion_fd_read,
NULL,
aio_set_fd_handler(new_context, s->completion_fd,
blkio_completion_fd_read, NULL,
blkio_completion_fd_poll,
blkio_completion_fd_poll_ready,
bs);
blkio_completion_fd_poll_ready, bs);
}
static void blkio_detach_aio_context(BlockDriverState *bs)
{
BDRVBlkioState *s = bs->opaque;
aio_set_fd_handler(bdrv_get_aio_context(bs),
s->completion_fd,
false, NULL, NULL, NULL, NULL, NULL);
aio_set_fd_handler(bdrv_get_aio_context(bs), s->completion_fd, NULL, NULL,
NULL, NULL, NULL);
}
/* Call with s->blkio_lock held to submit I/O after enqueuing a new request */

View File

@ -265,7 +265,8 @@ static int coroutine_fn GRAPH_RDLOCK blkverify_co_flush(BlockDriverState *bs)
return bdrv_co_flush(s->test_file->bs);
}
static bool blkverify_recurse_can_replace(BlockDriverState *bs,
static bool GRAPH_RDLOCK
blkverify_recurse_can_replace(BlockDriverState *bs,
BlockDriverState *to_replace)
{
BDRVBlkverifyState *s = bs->opaque;

View File

@ -80,9 +80,10 @@ struct BlockBackend {
NotifierList remove_bs_notifiers, insert_bs_notifiers;
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
int quiesce_counter;
int quiesce_counter; /* atomic: written under BQL, read by other threads */
QemuMutex queued_requests_lock; /* protects queued_requests */
CoQueue queued_requests;
bool disable_request_queuing;
bool disable_request_queuing; /* atomic */
VMChangeStateEntry *vmsh;
bool force_allow_inactivate;
@ -368,6 +369,7 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
block_acct_init(&blk->stats);
qemu_mutex_init(&blk->queued_requests_lock);
qemu_co_queue_init(&blk->queued_requests);
notifier_list_init(&blk->remove_bs_notifiers);
notifier_list_init(&blk->insert_bs_notifiers);
@ -387,6 +389,8 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
* Both sets of permissions can be changed later using blk_set_perm().
*
* Return the new BlockBackend on success, null on failure.
*
* Callers must hold the AioContext lock of @bs.
*/
BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
uint64_t shared_perm, Error **errp)
@ -404,11 +408,15 @@ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
/*
* Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
* The new BlockBackend is in the main AioContext.
* By default, the new BlockBackend is in the main AioContext, but if the
* parameters connect it with any existing node in a different AioContext, it
* may end up there instead.
*
* Just as with bdrv_open(), after having called this function the reference to
* @options belongs to the block layer (even on failure).
*
* Called without holding an AioContext lock.
*
* TODO: Remove @filename and @flags; it should be possible to specify a whole
* BDS tree just by specifying the @options QDict (or @reference,
* alternatively). At the time of adding this function, this is not possible,
@ -420,6 +428,7 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
{
BlockBackend *blk;
BlockDriverState *bs;
AioContext *ctx;
uint64_t perm = 0;
uint64_t shared = BLK_PERM_ALL;
@ -449,16 +458,24 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED;
}
blk = blk_new(qemu_get_aio_context(), perm, shared);
aio_context_acquire(qemu_get_aio_context());
bs = bdrv_open(filename, reference, options, flags, errp);
aio_context_release(qemu_get_aio_context());
if (!bs) {
blk_unref(blk);
return NULL;
}
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
perm, shared, blk, errp);
/* bdrv_open() could have moved bs to a different AioContext */
ctx = bdrv_get_aio_context(bs);
blk = blk_new(bdrv_get_aio_context(bs), perm, shared);
blk->perm = perm;
blk->shared_perm = shared;
aio_context_acquire(ctx);
blk_insert_bs(blk, bs, errp);
bdrv_unref(bs);
aio_context_release(ctx);
if (!blk->root) {
blk_unref(blk);
return NULL;
@ -485,6 +502,8 @@ static void blk_delete(BlockBackend *blk)
assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
assert(QLIST_EMPTY(&blk->aio_notifiers));
assert(qemu_co_queue_empty(&blk->queued_requests));
qemu_mutex_destroy(&blk->queued_requests_lock);
QTAILQ_REMOVE(&block_backends, blk, link);
drive_info_del(blk->legacy_dinfo);
block_acct_cleanup(&blk->stats);
@ -897,6 +916,8 @@ void blk_remove_bs(BlockBackend *blk)
/*
* Associates a new BlockDriverState with @blk.
*
* Callers must hold the AioContext lock of @bs.
*/
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
{
@ -1057,7 +1078,7 @@ void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
blk->dev_opaque = opaque;
/* Are we currently quiesced? Should we enforce this right now? */
if (blk->quiesce_counter && ops && ops->drained_begin) {
if (qatomic_read(&blk->quiesce_counter) && ops && ops->drained_begin) {
ops->drained_begin(opaque);
}
}
@ -1232,7 +1253,7 @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
{
IO_CODE();
blk->disable_request_queuing = disable;
qatomic_set(&blk->disable_request_queuing, disable);
}
static int coroutine_fn GRAPH_RDLOCK
@ -1266,15 +1287,30 @@ blk_check_byte_request(BlockBackend *blk, int64_t offset, int64_t bytes)
return 0;
}
/* Are we currently in a drained section? */
bool blk_in_drain(BlockBackend *blk)
{
GLOBAL_STATE_CODE(); /* change to IO_OR_GS_CODE(), if necessary */
return qatomic_read(&blk->quiesce_counter);
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
{
assert(blk->in_flight > 0);
if (blk->quiesce_counter && !blk->disable_request_queuing) {
if (qatomic_read(&blk->quiesce_counter) &&
!qatomic_read(&blk->disable_request_queuing)) {
/*
* Take lock before decrementing in flight counter so main loop thread
* waits for us to enqueue ourselves before it can leave the drained
* section.
*/
qemu_mutex_lock(&blk->queued_requests_lock);
blk_dec_in_flight(blk);
qemu_co_queue_wait(&blk->queued_requests, NULL);
qemu_co_queue_wait(&blk->queued_requests, &blk->queued_requests_lock);
blk_inc_in_flight(blk);
qemu_mutex_unlock(&blk->queued_requests_lock);
}
}
@ -1615,26 +1651,53 @@ int64_t coroutine_fn blk_co_getlength(BlockBackend *blk)
return bdrv_co_getlength(blk_bs(blk));
}
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
{
IO_CODE();
if (!blk_bs(blk)) {
*nb_sectors_ptr = 0;
} else {
bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
}
}
int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
IO_CODE();
GRAPH_RDLOCK_GUARD();
if (!blk_co_is_available(blk)) {
if (!bs) {
return -ENOMEDIUM;
} else {
return bdrv_co_nb_sectors(bs);
}
}
return bdrv_co_nb_sectors(blk_bs(blk));
/*
* This wrapper is written by hand because this function is in the hot I/O path,
* via blk_get_geometry.
*/
int64_t coroutine_mixed_fn blk_nb_sectors(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
IO_CODE();
if (!bs) {
return -ENOMEDIUM;
} else {
return bdrv_nb_sectors(bs);
}
}
/* return 0 as number of sectors if no device present or error */
void coroutine_fn blk_co_get_geometry(BlockBackend *blk,
uint64_t *nb_sectors_ptr)
{
int64_t ret = blk_co_nb_sectors(blk);
*nb_sectors_ptr = ret < 0 ? 0 : ret;
}
/*
* This wrapper is written by hand because this function is in the hot I/O path.
*/
void coroutine_mixed_fn blk_get_geometry(BlockBackend *blk,
uint64_t *nb_sectors_ptr)
{
int64_t ret = blk_nb_sectors(blk);
*nb_sectors_ptr = ret < 0 ? 0 : ret;
}
BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
@ -1806,6 +1869,204 @@ int coroutine_fn blk_co_flush(BlockBackend *blk)
return ret;
}
static void coroutine_fn blk_aio_zone_report_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_co_zone_report(rwco->blk, rwco->offset,
(unsigned int*)(uintptr_t)acb->bytes,
rwco->iobuf);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset,
unsigned int *nr_zones,
BlockZoneDescriptor *zones,
BlockCompletionFunc *cb, void *opaque)
{
BlkAioEmAIOCB *acb;
Coroutine *co;
IO_CODE();
blk_inc_in_flight(blk);
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
.offset = offset,
.iobuf = zones,
.ret = NOT_DONE,
};
acb->bytes = (int64_t)(uintptr_t)nr_zones,
acb->has_returned = false;
co = qemu_coroutine_create(blk_aio_zone_report_entry, acb);
aio_co_enter(blk_get_aio_context(blk), co);
acb->has_returned = true;
if (acb->rwco.ret != NOT_DONE) {
replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
blk_aio_complete_bh, acb);
}
return &acb->common;
}
static void coroutine_fn blk_aio_zone_mgmt_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_co_zone_mgmt(rwco->blk,
(BlockZoneOp)(uintptr_t)rwco->iobuf,
rwco->offset, acb->bytes);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
int64_t offset, int64_t len,
BlockCompletionFunc *cb, void *opaque) {
BlkAioEmAIOCB *acb;
Coroutine *co;
IO_CODE();
blk_inc_in_flight(blk);
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
.offset = offset,
.iobuf = (void *)(uintptr_t)op,
.ret = NOT_DONE,
};
acb->bytes = len;
acb->has_returned = false;
co = qemu_coroutine_create(blk_aio_zone_mgmt_entry, acb);
aio_co_enter(blk_get_aio_context(blk), co);
acb->has_returned = true;
if (acb->rwco.ret != NOT_DONE) {
replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
blk_aio_complete_bh, acb);
}
return &acb->common;
}
static void coroutine_fn blk_aio_zone_append_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_co_zone_append(rwco->blk, (int64_t *)(uintptr_t)acb->bytes,
rwco->iobuf, rwco->flags);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque) {
BlkAioEmAIOCB *acb;
Coroutine *co;
IO_CODE();
blk_inc_in_flight(blk);
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
.ret = NOT_DONE,
.flags = flags,
.iobuf = qiov,
};
acb->bytes = (int64_t)(uintptr_t)offset;
acb->has_returned = false;
co = qemu_coroutine_create(blk_aio_zone_append_entry, acb);
aio_co_enter(blk_get_aio_context(blk), co);
acb->has_returned = true;
if (acb->rwco.ret != NOT_DONE) {
replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
blk_aio_complete_bh, acb);
}
return &acb->common;
}
/*
* Send a zone_report command.
* offset is a byte offset from the start of the device. No alignment
* required for offset.
* nr_zones represents IN maximum and OUT actual.
*/
int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset,
unsigned int *nr_zones,
BlockZoneDescriptor *zones)
{
int ret;
IO_CODE();
blk_inc_in_flight(blk); /* increase before waiting */
blk_wait_while_drained(blk);
GRAPH_RDLOCK_GUARD();
if (!blk_is_available(blk)) {
blk_dec_in_flight(blk);
return -ENOMEDIUM;
}
ret = bdrv_co_zone_report(blk_bs(blk), offset, nr_zones, zones);
blk_dec_in_flight(blk);
return ret;
}
/*
* Send a zone_management command.
* op is the zone operation;
* offset is the byte offset from the start of the zoned device;
* len is the maximum number of bytes the command should operate on. It
* should be aligned with the device zone size.
*/
int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
int64_t offset, int64_t len)
{
int ret;
IO_CODE();
blk_inc_in_flight(blk);
blk_wait_while_drained(blk);
GRAPH_RDLOCK_GUARD();
ret = blk_check_byte_request(blk, offset, len);
if (ret < 0) {
blk_dec_in_flight(blk);
return ret;
}
ret = bdrv_co_zone_mgmt(blk_bs(blk), op, offset, len);
blk_dec_in_flight(blk);
return ret;
}
/*
* Send a zone_append command.
*/
int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
int ret;
IO_CODE();
blk_inc_in_flight(blk);
blk_wait_while_drained(blk);
GRAPH_RDLOCK_GUARD();
if (!blk_is_available(blk)) {
blk_dec_in_flight(blk);
return -ENOMEDIUM;
}
ret = bdrv_co_zone_append(blk_bs(blk), offset, qiov, flags);
blk_dec_in_flight(blk);
return ret;
}
void blk_drain(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
@ -1818,7 +2079,7 @@ void blk_drain(BlockBackend *blk)
/* We may have -ENOMEDIUM completions in flight */
AIO_WAIT_WHILE(blk_get_aio_context(blk),
qatomic_mb_read(&blk->in_flight) > 0);
qatomic_read(&blk->in_flight) > 0);
if (bs) {
bdrv_drained_end(bs);
@ -1835,14 +2096,8 @@ void blk_drain_all(void)
bdrv_drain_all_begin();
while ((blk = blk_all_next(blk)) != NULL) {
AioContext *ctx = blk_get_aio_context(blk);
aio_context_acquire(ctx);
/* We may have -ENOMEDIUM completions in flight */
AIO_WAIT_WHILE(ctx, qatomic_mb_read(&blk->in_flight) > 0);
aio_context_release(ctx);
AIO_WAIT_WHILE_UNLOCKED(NULL, qatomic_read(&blk->in_flight) > 0);
}
bdrv_drain_all_end();
@ -1991,8 +2246,16 @@ void blk_activate(BlockBackend *blk, Error **errp)
return;
}
/*
* Migration code can call this function in coroutine context, so leave
* coroutine context if necessary.
*/
if (qemu_in_coroutine()) {
bdrv_co_activate(bs, errp);
} else {
bdrv_activate(bs, errp);
}
}
bool coroutine_fn blk_co_is_inserted(BlockBackend *blk)
{
@ -2155,9 +2418,14 @@ void blk_op_unblock_all(BlockBackend *blk, Error *reason)
AioContext *blk_get_aio_context(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *bs;
IO_CODE();
if (!blk) {
return qemu_get_aio_context();
}
bs = blk_bs(blk);
if (bs) {
AioContext *ctx = bdrv_get_aio_context(blk_bs(blk));
assert(ctx == blk->ctx);
@ -2172,52 +2440,31 @@ static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
return blk_get_aio_context(blk_acb->blk);
}
static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
bool update_root_node, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
int ret;
if (bs) {
bdrv_ref(bs);
if (update_root_node) {
/*
* update_root_node MUST be false for blk_root_set_aio_ctx_commit(),
* as we are already in the commit function of a transaction.
*/
ret = bdrv_try_change_aio_context(bs, new_context, blk->root, errp);
if (ret < 0) {
bdrv_unref(bs);
return ret;
}
}
/*
* Make blk->ctx consistent with the root node before we invoke any
* other operations like drain that might inquire blk->ctx
*/
blk->ctx = new_context;
if (tgm->throttle_state) {
bdrv_drained_begin(bs);
throttle_group_detach_aio_context(tgm);
throttle_group_attach_aio_context(tgm, new_context);
bdrv_drained_end(bs);
}
bdrv_unref(bs);
} else {
blk->ctx = new_context;
}
return 0;
}
int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
Error **errp)
{
bool old_allow_change;
BlockDriverState *bs = blk_bs(blk);
int ret;
GLOBAL_STATE_CODE();
return blk_do_set_aio_context(blk, new_context, true, errp);
if (!bs) {
blk->ctx = new_context;
return 0;
}
bdrv_ref(bs);
old_allow_change = blk->allow_aio_context_change;
blk->allow_aio_context_change = true;
ret = bdrv_try_change_aio_context(bs, new_context, NULL, errp);
blk->allow_aio_context_change = old_allow_change;
bdrv_unref(bs);
return ret;
}
typedef struct BdrvStateBlkRootContext {
@ -2229,8 +2476,14 @@ static void blk_root_set_aio_ctx_commit(void *opaque)
{
BdrvStateBlkRootContext *s = opaque;
BlockBackend *blk = s->blk;
AioContext *new_context = s->new_ctx;
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
blk_do_set_aio_context(blk, s->new_ctx, false, &error_abort);
blk->ctx = new_context;
if (tgm->throttle_state) {
throttle_group_detach_aio_context(tgm);
throttle_group_attach_aio_context(tgm, new_context);
}
}
static TransactionActionDrv set_blk_root_context = {
@ -2568,7 +2821,7 @@ static void blk_root_drained_begin(BdrvChild *child)
BlockBackend *blk = child->opaque;
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
if (++blk->quiesce_counter == 1) {
if (qatomic_fetch_inc(&blk->quiesce_counter) == 0) {
if (blk->dev_ops && blk->dev_ops->drained_begin) {
blk->dev_ops->drained_begin(blk->dev_opaque);
}
@ -2586,7 +2839,7 @@ static bool blk_root_drained_poll(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
bool busy = false;
assert(blk->quiesce_counter);
assert(qatomic_read(&blk->quiesce_counter));
if (blk->dev_ops && blk->dev_ops->drained_poll) {
busy = blk->dev_ops->drained_poll(blk->dev_opaque);
@ -2597,18 +2850,21 @@ static bool blk_root_drained_poll(BdrvChild *child)
static void blk_root_drained_end(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
assert(blk->quiesce_counter);
assert(qatomic_read(&blk->quiesce_counter));
assert(blk->public.throttle_group_member.io_limits_disabled);
qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
if (--blk->quiesce_counter == 0) {
if (qatomic_fetch_dec(&blk->quiesce_counter) == 1) {
if (blk->dev_ops && blk->dev_ops->drained_end) {
blk->dev_ops->drained_end(blk->dev_opaque);
}
while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
qemu_mutex_lock(&blk->queued_requests_lock);
while (qemu_co_enter_next(&blk->queued_requests,
&blk->queued_requests_lock)) {
/* Resume all queued requests */
}
qemu_mutex_unlock(&blk->queued_requests_lock);
}
}

View File

@ -116,7 +116,6 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
int64_t offset;
uint64_t delay_ns = 0;
int ret = 0;
int64_t n = 0; /* bytes */
QEMU_AUTO_VFREE void *buf = NULL;
@ -149,7 +148,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
job_sleep_ns(&s->common.job, delay_ns);
block_job_ratelimit_sleep(&s->common);
if (job_is_cancelled(&s->common.job)) {
break;
}
@ -184,9 +183,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
job_progress_update(&s->common.job, n);
if (copy) {
delay_ns = block_job_ratelimit_get_delay(&s->common, n);
} else {
delay_ns = 0;
block_job_ratelimit_processed_bytes(&s->common, n);
}
}

View File

@ -412,6 +412,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
int64_t cluster_size;
g_autoptr(BlockdevOptions) full_opts = NULL;
BlockdevOptionsCbw *opts;
AioContext *ctx;
int ret;
full_opts = cbw_parse_options(options, errp);
@ -432,11 +433,15 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
return -EINVAL;
}
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (opts->bitmap) {
bitmap = block_dirty_bitmap_lookup(opts->bitmap->node,
opts->bitmap->name, NULL, errp);
if (!bitmap) {
return -EINVAL;
ret = -EINVAL;
goto out;
}
}
s->on_cbw_error = opts->has_on_cbw_error ? opts->on_cbw_error :
@ -454,21 +459,24 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp);
if (!s->bcs) {
error_prepend(errp, "Cannot create block-copy-state: ");
return -EINVAL;
ret = -EINVAL;
goto out;
}
cluster_size = block_copy_cluster_size(s->bcs);
s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
if (!s->done_bitmap) {
return -EINVAL;
ret = -EINVAL;
goto out;
}
bdrv_disable_dirty_bitmap(s->done_bitmap);
/* s->access_bitmap starts equal to bcs bitmap */
s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
if (!s->access_bitmap) {
return -EINVAL;
ret = -EINVAL;
goto out;
}
bdrv_disable_dirty_bitmap(s->access_bitmap);
bdrv_dirty_bitmap_merge_internal(s->access_bitmap,
@ -478,7 +486,10 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
qemu_co_mutex_init(&s->lock);
QLIST_INIT(&s->frozen_read_reqs);
return 0;
ret = 0;
out:
aio_context_release(ctx);
return ret;
}
static void cbw_close(BlockDriverState *bs)

View File

@ -259,7 +259,6 @@ static BlockDriver bdrv_copy_on_read = {
.bdrv_co_eject = cor_co_eject,
.bdrv_co_lock_medium = cor_co_lock_medium,
.has_variable_length = true,
.is_filter = true,
};

View File

@ -61,7 +61,7 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
int coroutine_fn
int coroutine_fn GRAPH_RDLOCK
nbd_co_do_establish_connection(BlockDriverState *bs, bool blocking,
Error **errp);
@ -85,7 +85,8 @@ bdrv_common_block_status_above(BlockDriverState *bs,
int64_t *map,
BlockDriverState **file,
int *depth);
int co_wrapper_mixed
int co_wrapper_mixed_bdrv_rdlock
nbd_do_establish_connection(BlockDriverState *bs, bool blocking, Error **errp);
#endif /* BLOCK_COROUTINES_H */

View File

@ -43,7 +43,6 @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
int ret;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD();
job_progress_set_remaining(&s->common, 1);
ret = s->drv->bdrv_co_create(s->opts, errp);

View File

@ -99,11 +99,9 @@ struct BlockCryptoCreateData {
};
static int block_crypto_create_write_func(QCryptoBlock *block,
size_t offset,
const uint8_t *buf,
size_t buflen,
void *opaque,
static int coroutine_fn GRAPH_UNLOCKED
block_crypto_create_write_func(QCryptoBlock *block, size_t offset,
const uint8_t *buf, size_t buflen, void *opaque,
Error **errp)
{
struct BlockCryptoCreateData *data = opaque;
@ -117,10 +115,9 @@ static int block_crypto_create_write_func(QCryptoBlock *block,
return 0;
}
static int block_crypto_create_init_func(QCryptoBlock *block,
size_t headerlen,
void *opaque,
Error **errp)
static int coroutine_fn GRAPH_UNLOCKED
block_crypto_create_init_func(QCryptoBlock *block, size_t headerlen,
void *opaque, Error **errp)
{
struct BlockCryptoCreateData *data = opaque;
Error *local_error = NULL;
@ -314,7 +311,7 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
}
static int coroutine_fn
static int coroutine_fn GRAPH_UNLOCKED
block_crypto_co_create_generic(BlockDriverState *bs, int64_t size,
QCryptoBlockCreateOptions *opts,
PreallocMode prealloc, Error **errp)
@ -355,7 +352,7 @@ block_crypto_co_create_generic(BlockDriverState *bs, int64_t size,
ret = 0;
cleanup:
qcrypto_block_free(crypto);
blk_unref(blk);
blk_co_unref(blk);
return ret;
}
@ -627,7 +624,7 @@ static int block_crypto_open_luks(BlockDriverState *bs,
bs, options, flags, errp);
}
static int coroutine_fn
static int coroutine_fn GRAPH_UNLOCKED
block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp)
{
BlockdevCreateOptionsLUKS *luks_opts;
@ -661,11 +658,11 @@ block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp)
ret = 0;
fail:
bdrv_unref(bs);
bdrv_co_unref(bs);
return ret;
}
static int coroutine_fn GRAPH_RDLOCK
static int coroutine_fn GRAPH_UNLOCKED
block_crypto_co_create_opts_luks(BlockDriver *drv, const char *filename,
QemuOpts *opts, Error **errp)
{
@ -727,16 +724,18 @@ fail:
* beforehand, it has been truncated and corrupted in the process.
*/
if (ret) {
bdrv_graph_co_rdlock();
bdrv_co_delete_file_noerr(bs);
bdrv_graph_co_rdunlock();
}
bdrv_unref(bs);
bdrv_co_unref(bs);
qapi_free_QCryptoBlockCreateOptions(create_opts);
qobject_unref(cryptoopts);
return ret;
}
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
block_crypto_co_get_info_luks(BlockDriverState *bs, BlockDriverInfo *bdi)
{
BlockDriverInfo subbdi;

View File

@ -132,7 +132,7 @@ static gboolean curl_drop_socket(void *key, void *value, void *opaque)
CURLSocket *socket = value;
BDRVCURLState *s = socket->s;
aio_set_fd_handler(s->aio_context, socket->fd, false,
aio_set_fd_handler(s->aio_context, socket->fd,
NULL, NULL, NULL, NULL, NULL);
return true;
}
@ -180,20 +180,20 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
trace_curl_sock_cb(action, (int)fd);
switch (action) {
case CURL_POLL_IN:
aio_set_fd_handler(s->aio_context, fd, false,
aio_set_fd_handler(s->aio_context, fd,
curl_multi_do, NULL, NULL, NULL, socket);
break;
case CURL_POLL_OUT:
aio_set_fd_handler(s->aio_context, fd, false,
aio_set_fd_handler(s->aio_context, fd,
NULL, curl_multi_do, NULL, NULL, socket);
break;
case CURL_POLL_INOUT:
aio_set_fd_handler(s->aio_context, fd, false,
aio_set_fd_handler(s->aio_context, fd,
curl_multi_do, curl_multi_do,
NULL, NULL, socket);
break;
case CURL_POLL_REMOVE:
aio_set_fd_handler(s->aio_context, fd, false,
aio_set_fd_handler(s->aio_context, fd,
NULL, NULL, NULL, NULL, NULL);
break;
}

View File

@ -23,7 +23,12 @@
*/
#include "qemu/osdep.h"
#include "dmg.h"
/* Work around a -Wstrict-prototypes warning in LZFSE headers */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-prototypes"
#include <lzfse.h>
#pragma GCC diagnostic pop
static int dmg_uncompress_lzfse_do(char *next_in, unsigned int avail_in,
char *next_out, unsigned int avail_out)

Some files were not shown because too many files have changed in this diff Show More