This commit is contained in:
Andrea Fioraldi 2023-09-05 10:50:58 +02:00
commit 8cdd7b951c
1155 changed files with 41996 additions and 14815 deletions

View File

@ -1,10 +1,24 @@
variables:
# On stable branches this is changed by later rules. Should also
# be overridden per pipeline if running pipelines concurrently
# for different branches in contributor forks.
QEMU_CI_CONTAINER_TAG: latest
# For purposes of CI rules, upstream is the gitlab.com/qemu-project
# namespace. When testing CI, it might be usefult to override this
# to point to a fork repo
QEMU_CI_UPSTREAM: qemu-project
# The order of rules defined here is critically important.
# They are evaluated in order and first match wins.
#
# Thus we group them into a number of stages, ordered from
# most restrictive to least restrictive
#
# For pipelines running for stable "staging-X.Y" branches
# we must override QEMU_CI_CONTAINER_TAG
#
.base_job_template:
variables:
# Each script line from will be in a collapsible section in the job output
@ -19,28 +33,36 @@
# want jobs to run
#############################################################
# Never run jobs upstream on stable branch, staging branch jobs already ran
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /^stable-/'
when: never
# Never run jobs upstream on tags, staging branch jobs already ran
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG'
when: never
# Cirrus jobs can't run unless the creds / target repo are set
- if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
when: never
# Publishing jobs should only run on the default branch in upstream
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
when: never
# Non-publishing jobs should only run on staging branches in upstream
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH !~ /staging/'
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH !~ /staging/'
when: never
# Jobs only intended for forks should always be skipped on upstream
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == "qemu-project"'
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
when: never
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
when: never
# Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
when: never
@ -50,17 +72,29 @@
#############################################################
# Optional jobs should not be run unless manually triggered
- if: '$QEMU_JOB_OPTIONAL && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
allow_failure: true
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_JOB_OPTIONAL'
when: manual
allow_failure: true
# Skipped jobs should not be run unless manually triggered
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
allow_failure: true
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_JOB_SKIPPED'
when: manual
allow_failure: true
# Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != "qemu-project"'
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
when: manual
allow_failure: true
@ -72,8 +106,23 @@
# Forks pipeline jobs don't start automatically unless
# QEMU_CI=2 is set
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
when: manual
# Upstream pipeline jobs start automatically unless told not to
# by setting QEMU_CI=1
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
when: manual
# Jobs can run if any jobs they depend on were successful
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: on_success
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- when: on_success

View File

@ -1,12 +1,22 @@
.native_build_job_template:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
cache:
paths:
- ccache
key: "$CI_JOB_NAME"
when: always
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
- mkdir build
- cd build
- ccache --zero-stats
- ../configure --enable-werror --disable-docs --enable-fdt=system
${TARGETS:+--target-list="$TARGETS"}
$CONFIGURE_ARGS ||
@ -20,11 +30,13 @@
then
make -j"$JOBS" $MAKE_CHECK_ARGS ;
fi
- ccache --show-stats
# We jump some hoops in common_test_job_template to avoid
# rebuilding all the object files we skip in the artifacts
.native_build_artifact_template:
artifacts:
when: on_success
expire_in: 2 days
paths:
- build
@ -40,7 +52,7 @@
.common_test_job_template:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
script:
- scripts/git-submodule.sh update roms/SLOF
- meson subprojects download $(cd build/subprojects && echo *)
@ -53,6 +65,7 @@
extends: .common_test_job_template
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
@ -68,7 +81,7 @@
policy: pull-push
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: on_failure
when: always
expire_in: 7 days
paths:
- build/tests/results/latest/results.xml

View File

@ -103,7 +103,7 @@ crash-test-debian:
script:
- cd build
- make NINJA=":" check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
- pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
build-system-fedora:
extends:
@ -146,8 +146,8 @@ crash-test-fedora:
script:
- cd build
- make NINJA=":" check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
build-system-centos:
extends:
@ -454,7 +454,7 @@ gcov:
IMAGE: ubuntu2204
CONFIGURE_ARGS: --enable-gcov
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
MAKE_CHECK_ARGS: check
MAKE_CHECK_ARGS: check-unit check-softfloat
after_script:
- cd build
- gcovr --xml-pretty --exclude-unreachable-branches --print-summary
@ -462,8 +462,12 @@ gcov:
coverage: /^\s*lines:\s*\d+.\d+\%/
artifacts:
name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
when: always
expire_in: 2 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: build/meson-logs/testlog.junit.xml
coverage_report:
coverage_format: cobertura
path: build/coverage.xml
@ -532,7 +536,7 @@ build-without-defaults:
build-libvhost-user:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
needs:
job: amd64-fedora-container
script:
@ -572,7 +576,7 @@ build-tools-and-docs-debian:
# of what topic branch they're currently using
pages:
extends: .base_job_template
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:$QEMU_CI_CONTAINER_TAG
stage: test
needs:
- job: build-tools-and-docs-debian
@ -587,6 +591,7 @@ pages:
- make -C build install DESTDIR=$(pwd)/temp-install
- mv temp-install/usr/local/share/doc/qemu/* public/
artifacts:
when: on_success
paths:
- public
variables:

View File

@ -50,7 +50,7 @@ x64-freebsd-13-build:
NAME: freebsd-13
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-13-1
CIRRUS_VM_IMAGE_NAME: freebsd-13-2
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update; pkg upgrade -y

View File

@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-tomli py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
PYPI_PKGS=''
PYTHON='/usr/local/bin/python3'

View File

@ -15,7 +15,7 @@ env:
folder: $HOME/.cache/qemu-vm
install_script:
- dnf update -y
- dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget
- dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget meson
clone_script:
- git clone --depth 100 "$CI_REPOSITORY_URL" .
- git fetch origin "$CI_COMMIT_REF_NAME"

View File

@ -12,5 +12,5 @@ NINJA='/opt/homebrew/bin/ninja'
PACKAGING_COMMAND='brew'
PIP3='/opt/homebrew/bin/pip3'
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 xorriso zlib zstd'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
PYTHON='/opt/homebrew/bin/python3'

View File

@ -5,7 +5,8 @@
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:$QEMU_CI_CONTAINER_TAG"
# Always ':latest' because we always use upstream as a common cache source
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
- until docker info; do sleep 1; done

View File

@ -1,11 +1,21 @@
.cross_system_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
cache:
paths:
- ccache
key: "$CI_JOB_NAME"
when: always
timeout: 80m
script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
- mkdir build
- cd build
- ccache --zero-stats
- ../configure --enable-werror --disable-docs --enable-fdt=system
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
--target-list-exclude="arm-softmmu cris-softmmu
@ -18,6 +28,7 @@
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
fi
- ccache --show-stats
# Job to cross-build specific accelerators.
#
@ -27,9 +38,17 @@
.cross_accel_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
timeout: 30m
cache:
paths:
- ccache/
key: "$CI_JOB_NAME"
script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
- mkdir build
- cd build
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
@ -39,8 +58,15 @@
.cross_user_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
cache:
paths:
- ccache/
key: "$CI_JOB_NAME"
script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- mkdir build
- cd build
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
@ -55,6 +81,7 @@
.cross_test_artifacts:
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt

View File

@ -57,7 +57,7 @@ cross-i386-tci:
variables:
IMAGE: fedora-i386-cross
ACCEL: tcg-interpreter
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins
MAKE_CHECK_ARGS: check check-tcg
cross-mipsel-system:
@ -169,6 +169,7 @@ cross-win32-system:
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
microblazeel-softmmu mips64el-softmmu nios2-softmmu
artifacts:
when: on_success
paths:
- build/qemu-setup*.exe
@ -184,6 +185,7 @@ cross-win64-system:
or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
tricore-softmmu xtensaeb-softmmu
artifacts:
when: on_success
paths:
- build/qemu-setup*.exe

View File

@ -63,6 +63,7 @@ build-opensbi:
stage: build
needs: ['docker-opensbi']
artifacts:
when: on_success
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin

View File

@ -26,7 +26,7 @@ check-dco:
check-python-minreqs:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:latest
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
script:
- make -C python check-minreqs
variables:
@ -37,7 +37,7 @@ check-python-minreqs:
check-python-tox:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:latest
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
script:
- make -C python check-tox
variables:

View File

@ -5,21 +5,60 @@
- windows
- windows-1809
cache:
key: "${CI_JOB_NAME}-cache"
key: "$CI_JOB_NAME"
paths:
- ${CI_PROJECT_DIR}/msys64/var/cache
- msys64/var/cache
- ccache
when: always
needs: []
stage: build
timeout: 80m
timeout: 100m
variables:
# This feature doesn't (currently) work with PowerShell, it stops
# the echo'ing of commands being run and doesn't show any timing
FF_SCRIPT_SECTIONS: 0
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: "build/meson-logs/testlog.junit.xml"
before_script:
- Write-Output "Acquiring msys2.exe installer at $(Get-Date -Format u)"
- If ( !(Test-Path -Path msys64\var\cache ) ) {
mkdir msys64\var\cache
}
- If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
Invoke-WebRequest
"https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe"
-outfile "msys64\var\cache\msys2.exe"
- Invoke-WebRequest
"https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe.sig"
-outfile "msys2.exe.sig"
- if ( Test-Path -Path msys64\var\cache\msys2.exe.sig ) {
Write-Output "Cached installer sig" ;
if ( ((Get-FileHash msys2.exe.sig).Hash -ne (Get-FileHash msys64\var\cache\msys2.exe.sig).Hash) ) {
Write-Output "Mis-matched installer sig, new installer download required" ;
Remove-Item -Path msys64\var\cache\msys2.exe.sig ;
if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
Remove-Item -Path msys64\var\cache\msys2.exe
}
} else {
Write-Output "Matched installer sig, cached installer still valid"
}
} else {
Write-Output "No cached installer sig, new installer download required" ;
if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
Remove-Item -Path msys64\var\cache\msys2.exe
}
}
- if ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
Write-Output "Fetching latest installer" ;
Invoke-WebRequest
"https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe"
-outfile "msys64\var\cache\msys2.exe" ;
Copy-Item -Path msys2.exe.sig -Destination msys64\var\cache\msys2.exe.sig
} else {
Write-Output "Using cached installer"
}
- Write-Output "Invoking msys2.exe installer at $(Get-Date -Format u)"
- msys64\var\cache\msys2.exe -y
- ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw)
-replace '--refresh-keys', '--version') |
@ -28,97 +67,74 @@
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Core update
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Normal update
- taskkill /F /FI "MODULES eq msys-2.0.dll"
script:
- Write-Output "Installing mingw packages at $(Get-Date -Format u)"
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
bison diffutils flex
git grep make sed
$MINGW_TARGET-capstone
$MINGW_TARGET-ccache
$MINGW_TARGET-curl
$MINGW_TARGET-cyrus-sasl
$MINGW_TARGET-dtc
$MINGW_TARGET-gcc
$MINGW_TARGET-glib2
$MINGW_TARGET-gnutls
$MINGW_TARGET-gtk3
$MINGW_TARGET-libgcrypt
$MINGW_TARGET-libjpeg-turbo
$MINGW_TARGET-libnfs
$MINGW_TARGET-libpng
$MINGW_TARGET-libssh
$MINGW_TARGET-libtasn1
$MINGW_TARGET-libusb
$MINGW_TARGET-lzo2
$MINGW_TARGET-nettle
$MINGW_TARGET-ninja
$MINGW_TARGET-pixman
$MINGW_TARGET-pkgconf
$MINGW_TARGET-python
$MINGW_TARGET-SDL2
$MINGW_TARGET-SDL2_image
$MINGW_TARGET-snappy
$MINGW_TARGET-spice
$MINGW_TARGET-usbredir
$MINGW_TARGET-zstd "
- Write-Output "Running build at $(Get-Date -Format u)"
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
- $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache"
- $env:CCACHE_MAXSIZE = "500M"
- $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode
- $env:CC = "ccache gcc"
- mkdir build
- cd build
- ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
- ..\msys64\usr\bin\bash -lc "../configure --enable-fdt=system $CONFIGURE_ARGS"
- ..\msys64\usr\bin\bash -lc "make"
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
- Write-Output "Finished build at $(Get-Date -Format u)"
msys2-64bit:
extends: .shared_msys2_builder
script:
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
bison diffutils flex
git grep make sed
mingw-w64-x86_64-capstone
mingw-w64-x86_64-curl
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-dtc
mingw-w64-x86_64-gcc
mingw-w64-x86_64-glib2
mingw-w64-x86_64-gnutls
mingw-w64-x86_64-gtk3
mingw-w64-x86_64-libgcrypt
mingw-w64-x86_64-libjpeg-turbo
mingw-w64-x86_64-libnfs
mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
mingw-w64-x86_64-libtasn1
mingw-w64-x86_64-libusb
mingw-w64-x86_64-lzo2
mingw-w64-x86_64-nettle
mingw-w64-x86_64-ninja
mingw-w64-x86_64-pixman
mingw-w64-x86_64-pkgconf
mingw-w64-x86_64-python
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-snappy
mingw-w64-x86_64-spice
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW64' # Start a 64-bit MinGW environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir output
- cd output
# Note: do not remove "--without-default-devices"!
variables:
MINGW_TARGET: mingw-w64-x86_64
MSYSTEM: MINGW64
# do not remove "--without-default-devices"!
# commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
# changed to compile QEMU with the --without-default-devices switch
# for the msys2 64-bit job, due to the build could not complete within
# the project timeout.
- ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu
--without-default-devices --enable-fdt=system'
- ..\msys64\usr\bin\bash -lc 'make'
CONFIGURE_ARGS: --target-list=x86_64-softmmu --without-default-devices -Ddebug=false -Doptimization=0
# qTests don't run successfully with "--without-default-devices",
# so let's exclude the qtests from CI for now.
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" || { cat meson-logs/testlog.txt; exit 1; } ;'
TEST_ARGS: --no-suite qtest
msys2-32bit:
extends: .shared_msys2_builder
script:
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
bison diffutils flex
git grep make sed
mingw-w64-i686-capstone
mingw-w64-i686-curl
mingw-w64-i686-cyrus-sasl
mingw-w64-i686-dtc
mingw-w64-i686-gcc
mingw-w64-i686-glib2
mingw-w64-i686-gnutls
mingw-w64-i686-gtk3
mingw-w64-i686-libgcrypt
mingw-w64-i686-libjpeg-turbo
mingw-w64-i686-libnfs
mingw-w64-i686-libpng
mingw-w64-i686-libssh
mingw-w64-i686-libtasn1
mingw-w64-i686-libusb
mingw-w64-i686-lzo2
mingw-w64-i686-nettle
mingw-w64-i686-ninja
mingw-w64-i686-pixman
mingw-w64-i686-pkgconf
mingw-w64-i686-python
mingw-w64-i686-SDL2
mingw-w64-i686-SDL2_image
mingw-w64-i686-snappy
mingw-w64-i686-spice
mingw-w64-i686-usbredir
mingw-w64-i686-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinGW environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir output
- cd output
- ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu
--enable-fdt=system'
- ..\msys64\usr\bin\bash -lc 'make'
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" ||
{ cat meson-logs/testlog.txt; exit 1; }'
variables:
MINGW_TARGET: mingw-w64-i686
MSYSTEM: MINGW32
CONFIGURE_ARGS: --target-list=ppc64-softmmu -Ddebug=false -Doptimization=0
TEST_ARGS: --no-suite qtest

View File

@ -76,9 +76,10 @@ Paul Burton <paulburton@kernel.org> <pburton@wavecomp.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com>
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
# Also list preferred name forms where people have changed their
# git author config, or had utf8/latin1 encoding issues.

View File

@ -302,6 +302,7 @@ M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: Cédric Le Goater <clg@kaod.org>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
R: Nicholas Piggin <npiggin@gmail.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: target/ppc/
@ -451,8 +452,6 @@ S: Supported
F: target/s390x/kvm/
F: target/s390x/machine.c
F: target/s390x/sigp.c
F: hw/s390x/pv.c
F: include/hw/s390x/pv.h
F: gdb-xml/s390*.xml
T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
@ -497,14 +496,14 @@ F: target/arm/hvf/
X86 HVF CPUs
M: Cameron Esfahani <dirty@apple.com>
M: Roman Bolshakov <r.bolshakov@yadro.com>
M: Roman Bolshakov <rbolshakov@ddn.com>
W: https://wiki.qemu.org/Features/HVF
S: Maintained
F: target/i386/hvf/
HVF
M: Cameron Esfahani <dirty@apple.com>
M: Roman Bolshakov <r.bolshakov@yadro.com>
M: Roman Bolshakov <rbolshakov@ddn.com>
W: https://wiki.qemu.org/Features/HVF
S: Maintained
F: accel/hvf/
@ -1225,6 +1224,7 @@ q800
M: Laurent Vivier <laurent@vivier.eu>
S: Maintained
F: hw/m68k/q800.c
F: hw/m68k/q800-glue.c
F: hw/misc/mac_via.c
F: hw/nubus/*
F: hw/display/macfb.c
@ -1236,6 +1236,8 @@ F: include/hw/misc/mac_via.h
F: include/hw/nubus/*
F: include/hw/display/macfb.h
F: include/hw/block/swim.h
F: include/hw/m68k/q800.h
F: include/hw/m68k/q800-glue.h
virt
M: Laurent Vivier <laurent@vivier.eu>
@ -1448,6 +1450,8 @@ F: tests/avocado/ppc_pseries.py
PowerNV (Non-Virtualized)
M: Cédric Le Goater <clg@kaod.org>
R: Frédéric Barrat <fbarrat@linux.ibm.com>
R: Nicholas Piggin <npiggin@gmail.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: docs/system/ppc/powernv.rst
@ -2045,7 +2049,7 @@ F: hw/usb/dev-serial.c
VFIO
M: Alex Williamson <alex.williamson@redhat.com>
R: Cédric Le Goater <clg@redhat.com>
M: Cédric Le Goater <clg@redhat.com>
S: Supported
F: hw/vfio/*
F: include/hw/vfio/
@ -2114,17 +2118,24 @@ F: include/sysemu/balloon.h
virtio-9p
M: Greg Kurz <groug@kaod.org>
M: Christian Schoenebeck <qemu_oss@crudebyte.com>
S: Odd Fixes
S: Maintained
W: https://wiki.qemu.org/Documentation/9p
F: hw/9pfs/
X: hw/9pfs/xen-9p*
X: hw/9pfs/9p-proxy*
F: fsdev/
F: docs/tools/virtfs-proxy-helper.rst
X: fsdev/virtfs-proxy-helper.c
F: tests/qtest/virtio-9p-test.c
F: tests/qtest/libqos/virtio-9p*
T: git https://gitlab.com/gkurz/qemu.git 9p-next
T: git https://github.com/cschoenebeck/qemu.git 9p.next
virtio-9p-proxy
F: hw/9pfs/9p-proxy*
F: fsdev/virtfs-proxy-helper.c
F: docs/tools/virtfs-proxy-helper.rst
S: Obsolete
virtio-blk
M: Stefan Hajnoczi <stefanha@redhat.com>
L: qemu-block@nongnu.org
@ -2204,6 +2215,13 @@ F: hw/virtio/vhost-user-gpio*
F: include/hw/virtio/vhost-user-gpio.h
F: tests/qtest/libqos/virtio-gpio.*
vhost-user-scmi
R: mzamazal@redhat.com
S: Supported
F: hw/virtio/vhost-user-scmi*
F: include/hw/virtio/vhost-user-scmi.h
F: tests/qtest/libqos/virtio-scmi.*
virtio-crypto
M: Gonglei <arei.gonglei@huawei.com>
S: Supported
@ -2211,6 +2229,13 @@ F: hw/virtio/virtio-crypto.c
F: hw/virtio/virtio-crypto-pci.c
F: include/hw/virtio/virtio-crypto.h
virtio based memory device
M: David Hildenbrand <david@redhat.com>
S: Supported
F: hw/virtio/virtio-md-pci.c
F: include/hw/virtio/virtio-md-pci.h
F: stubs/virtio-md-pci.c
virtio-mem
M: David Hildenbrand <david@redhat.com>
S: Supported
@ -2442,6 +2467,7 @@ T: git https://github.com/philmd/qemu.git fw_cfg-next
XIVE
M: Cédric Le Goater <clg@kaod.org>
R: Frédéric Barrat <fbarrat@linux.ibm.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/*/*xive*
@ -3099,6 +3125,7 @@ R: Qiuhao Li <Qiuhao.Li@outlook.com>
S: Maintained
F: tests/qtest/fuzz/
F: tests/qtest/fuzz-*test.c
F: tests/docker/test-fuzz
F: scripts/oss-fuzz/
F: hw/mem/sparse-mem.c
F: docs/devel/fuzzing.rst
@ -3182,6 +3209,15 @@ F: qapi/migration.json
F: tests/migration/
F: util/userfaultfd.c
Migration dirty limit and dirty page rate
M: Hyman Huang <yong.huang@smartx.com>
S: Maintained
F: softmmu/dirtylimit.c
F: include/sysemu/dirtylimit.h
F: migration/dirtyrate.c
F: migration/dirtyrate.h
F: include/sysemu/dirtyrate.h
D-Bus
M: Marc-André Lureau <marcandre.lureau@redhat.com>
S: Maintained
@ -3195,6 +3231,7 @@ F: docs/interop/dbus*
F: docs/sphinx/dbus*
F: docs/sphinx/fakedbusdoc.py
F: tests/qtest/dbus*
F: scripts/xml-preprocess*
Seccomp
M: Daniel P. Berrange <berrange@redhat.com>
@ -3208,6 +3245,7 @@ M: Daniel P. Berrange <berrange@redhat.com>
S: Maintained
F: crypto/
F: include/crypto/
F: host/include/*/host/crypto/
F: qapi/crypto.json
F: tests/unit/test-crypto-*
F: tests/bench/benchmark-crypto-*

View File

@ -28,7 +28,7 @@ quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
UNCHECKED_GOALS := TAGS gtags cscope ctags dist \
help check-help print-% \
docker docker-% vm-help vm-test vm-build-%
docker docker-% lcitool-refresh vm-help vm-test vm-build-%
all:
.PHONY: all clean distclean recurse-all dist msi FORCE
@ -83,16 +83,17 @@ config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh
@if test -f meson-private/coredata.dat; then \
./config.status --skip-meson; \
else \
./config.status && touch build.ninja.stamp; \
./config.status; \
fi
# 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
# works), but otherwise never needs to be updated
meson-private/coredata.dat: meson.stamp
meson.stamp: config-host.mak
@touch meson.stamp
# 3. ensure generated build files are up-to-date
# 3. ensure meson-generated build files are up-to-date
ifneq ($(NINJA),)
Makefile.ninja: build.ninja
@ -106,11 +107,19 @@ Makefile.ninja: build.ninja
endif
ifneq ($(MESON),)
# A separate rule is needed for Makefile dependencies to avoid -n
# The path to meson always points to pyvenv/bin/meson, but the absolute
# paths could change. In that case, force a regeneration of build.ninja.
# Note that this invocation of $(NINJA), just like when Make rebuilds
# Makefiles, does not include -n.
build.ninja: build.ninja.stamp
$(build-files):
build.ninja.stamp: meson.stamp $(build-files)
$(MESON) setup --reconfigure $(SRC_PATH) && touch $@
@if test "$$(cat build.ninja.stamp)" = "$(MESON)" && test -n "$(NINJA)"; then \
$(NINJA) build.ninja; \
else \
echo "$(MESON) setup --reconfigure $(SRC_PATH)"; \
$(MESON) setup --reconfigure $(SRC_PATH); \
fi && echo "$(MESON)" > $@
Makefile.mtest: build.ninja scripts/mtest2make.py
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@

View File

@ -1 +1 @@
8.0.50
8.1.50

View File

@ -304,7 +304,7 @@ static void hvf_region_del(MemoryListener *listener,
static MemoryListener hvf_memory_listener = {
.name = "hvf",
.priority = 10,
.priority = MEMORY_LISTENER_PRIORITY_ACCEL,
.region_add = hvf_region_add,
.region_del = hvf_region_del,
.log_start = hvf_log_start,
@ -372,19 +372,19 @@ type_init(hvf_type_init);
static void hvf_vcpu_destroy(CPUState *cpu)
{
hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
assert_hvf_ok(ret);
hvf_arch_vcpu_destroy(cpu);
g_free(cpu->hvf);
cpu->hvf = NULL;
g_free(cpu->accel);
cpu->accel = NULL;
}
static int hvf_init_vcpu(CPUState *cpu)
{
int r;
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
cpu->accel = g_new0(AccelCPUState, 1);
/* init cpu signals */
struct sigaction sigact;
@ -393,18 +393,19 @@ static int hvf_init_vcpu(CPUState *cpu)
sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL);
pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask);
sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI);
#ifdef __aarch64__
r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
r = hv_vcpu_create(&cpu->accel->fd,
(hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
#else
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT);
#endif
cpu->vcpu_dirty = 1;
assert_hvf_ok(r);
cpu->hvf->guest_debug_enabled = false;
cpu->accel->guest_debug_enabled = false;
return hvf_arch_init_vcpu(cpu);
}
@ -473,7 +474,7 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
cpu, QEMU_THREAD_JOINABLE);
}
static int hvf_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
{
struct hvf_sw_breakpoint *bp;
int err;
@ -511,7 +512,7 @@ static int hvf_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr le
return 0;
}
static int hvf_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
{
struct hvf_sw_breakpoint *bp;
int err;

View File

@ -51,7 +51,7 @@ void assert_hvf_ok(hv_return_t ret)
abort();
}
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, target_ulong pc)
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
{
struct hvf_sw_breakpoint *bp;

View File

@ -450,6 +450,8 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
"kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
kvm_arch_vcpu_id(cpu));
}
cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
err:
return ret;
}
@ -1103,6 +1105,7 @@ static MemoryListener kvm_coalesced_pio_listener = {
.name = "kvm-coalesced-pio",
.coalesced_io_add = kvm_coalesce_pio_add,
.coalesced_io_del = kvm_coalesce_pio_del,
.priority = MEMORY_LISTENER_PRIORITY_MIN,
};
int kvm_check_extension(KVMState *s, unsigned int extension)
@ -1451,15 +1454,13 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
return NULL;
}
static int kvm_dirty_ring_reaper_init(KVMState *s)
static void kvm_dirty_ring_reaper_init(KVMState *s)
{
struct KVMDirtyRingReaper *r = &s->reaper;
qemu_thread_create(&r->reaper_thr, "kvm-reaper",
kvm_dirty_ring_reaper_thread,
s, QEMU_THREAD_JOINABLE);
return 0;
}
static int kvm_dirty_ring_init(KVMState *s)
@ -1775,7 +1776,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
kml->listener.commit = kvm_region_commit;
kml->listener.log_start = kvm_log_start;
kml->listener.log_stop = kvm_log_stop;
kml->listener.priority = 10;
kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
kml->listener.name = name;
if (s->kvm_dirty_ring_size) {
@ -1800,7 +1801,7 @@ static MemoryListener kvm_io_listener = {
.name = "kvm-io",
.eventfd_add = kvm_io_ioeventfd_add,
.eventfd_del = kvm_io_ioeventfd_del,
.priority = 10,
.priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
};
int kvm_set_irq(KVMState *s, int irq, int level)
@ -2455,7 +2456,7 @@ static int kvm_init(MachineState *ms)
KVMState *s;
const KVMCapabilityInfo *missing_cap;
int ret;
int type = 0;
int type;
uint64_t dirty_log_manual_caps;
qemu_mutex_init(&kml_slots_lock);
@ -2520,6 +2521,13 @@ static int kvm_init(MachineState *ms)
type = mc->kvm_type(ms, kvm_type);
} else if (mc->kvm_type) {
type = mc->kvm_type(ms, NULL);
} else {
type = kvm_arch_get_default_type(ms);
}
if (type < 0) {
ret = -EINVAL;
goto err;
}
do {
@ -2734,10 +2742,7 @@ static int kvm_init(MachineState *ms)
}
if (s->kvm_dirty_ring_size) {
ret = kvm_dirty_ring_reaper_init(s);
if (ret) {
goto err;
}
kvm_dirty_ring_reaper_init(s);
}
if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
@ -2755,6 +2760,7 @@ err:
if (s->fd != -1) {
close(s->fd);
}
g_free(s->as);
g_free(s->memory_listener.slots);
return ret;
@ -2809,7 +2815,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
{
KVMState *s = kvm_state;
if (s->coalesced_flush_in_progress) {
if (!s || s->coalesced_flush_in_progress) {
return;
}
@ -3303,8 +3309,7 @@ bool kvm_arm_supports_user_irq(void)
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
target_ulong pc)
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
{
struct kvm_sw_breakpoint *bp;
@ -4007,7 +4012,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
/* Read stats header */
kvm_stats_header = &descriptors->kvm_stats_header;
ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
if (ret != sizeof(*kvm_stats_header)) {
error_setg(errp, "KVM stats: failed to read stats header: "
"expected %zu actual %zu",
@ -4038,7 +4043,8 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
}
static void query_stats(StatsResultList **result, StatsTarget target,
strList *names, int stats_fd, Error **errp)
strList *names, int stats_fd, CPUState *cpu,
Error **errp)
{
struct kvm_stats_desc *kvm_stats_desc;
struct kvm_stats_header *kvm_stats_header;
@ -4096,7 +4102,7 @@ static void query_stats(StatsResultList **result, StatsTarget target,
break;
case STATS_TARGET_VCPU:
add_stats_entry(result, STATS_PROVIDER_KVM,
current_cpu->parent_obj.canonical_path,
cpu->parent_obj.canonical_path,
stats_list);
break;
default:
@ -4133,10 +4139,9 @@ static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
}
static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
{
StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
int stats_fd = cpu->kvm_vcpu_stats_fd;
Error *local_err = NULL;
if (stats_fd == -1) {
@ -4145,14 +4150,13 @@ static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
return;
}
query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
kvm_stats_args->names, stats_fd, kvm_stats_args->errp);
close(stats_fd);
kvm_stats_args->names, stats_fd, cpu,
kvm_stats_args->errp);
}
static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
{
StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
int stats_fd = cpu->kvm_vcpu_stats_fd;
Error *local_err = NULL;
if (stats_fd == -1) {
@ -4162,7 +4166,6 @@ static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
}
query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
kvm_stats_args->errp);
close(stats_fd);
}
static void query_stats_cb(StatsResultList **result, StatsTarget target,
@ -4180,7 +4183,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
error_setg_errno(errp, errno, "KVM stats: ioctl failed");
return;
}
query_stats(result, target, names, stats_fd, errp);
query_stats(result, target, names, stats_fd, NULL, errp);
close(stats_fd);
break;
}
@ -4194,7 +4197,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
continue;
}
run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
query_stats_vcpu(cpu, &stats_args);
}
break;
}
@ -4220,6 +4223,6 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
if (first_cpu) {
stats_args.result.schema = result;
stats_args.errp = errp;
run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
query_stats_schema_vcpu(first_cpu, &stats_args);
}
}

View File

@ -27,6 +27,7 @@ bool kvm_allowed;
bool kvm_readonly_mem_allowed;
bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid;
bool kvm_direct_msi_allowed;
void kvm_flush_coalesced_mmio_buffer(void)
{

View File

@ -18,7 +18,7 @@ void tb_flush(CPUState *cpu)
{
}
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
void tlb_set_dirty(CPUState *cpu, vaddr vaddr)
{
}
@ -26,14 +26,14 @@ void tcg_flush_jmp_cache(CPUState *cpu)
{
}
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
g_assert_not_reached();
}
void *probe_access(CPUArchState *env, target_ulong addr, int size,
void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
/* Handled by hardware accelerator. */

View File

@ -41,7 +41,7 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif
#ifdef CONFIG_CMPXCHG128
#if HAVE_CMPXCHG128
CMPXCHG_HELPER(cmpxchgo_be, Int128)
CMPXCHG_HELPER(cmpxchgo_le, Int128)
#endif

View File

@ -69,7 +69,7 @@
# define END _le
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
@ -87,7 +87,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
}
#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
@ -100,7 +100,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
}
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
@ -131,7 +131,7 @@ GEN_ATOMIC_HELPER(xor_fetch)
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
@ -172,7 +172,7 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
# define END _be
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
@ -190,7 +190,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
}
#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
@ -203,7 +203,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
}
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
@ -231,7 +231,7 @@ GEN_ATOMIC_HELPER(xor_fetch)
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \

View File

@ -33,36 +33,6 @@ void cpu_loop_exit_noexc(CPUState *cpu)
cpu_loop_exit(cpu);
}
#if defined(CONFIG_SOFTMMU)
void cpu_reloading_memory_map(void)
{
if (qemu_in_vcpu_thread() && current_cpu->running) {
/* The guest can in theory prolong the RCU critical section as long
* as it feels like. The major problem with this is that because it
* can do multiple reconfigurations of the memory map within the
* critical section, we could potentially accumulate an unbounded
* collection of memory data structures awaiting reclamation.
*
* Because the only thing we're currently protecting with RCU is the
* memory data structures, it's sufficient to break the critical section
* in this callback, which we know will get called every time the
* memory map is rearranged.
*
* (If we add anything else in the system that uses RCU to protect
* its data structures, we will need to implement some other mechanism
* to force TCG CPUs to exit the critical section, at which point this
* part of this callback might become unnecessary.)
*
* This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
* only protects cpu->as->dispatch. Since we know our caller is about
* to reload it, it's safe to split the critical section.
*/
rcu_read_unlock();
rcu_read_lock();
}
}
#endif
void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */

View File

@ -169,8 +169,8 @@ uint32_t curr_cflags(CPUState *cpu)
}
struct tb_desc {
target_ulong pc;
target_ulong cs_base;
vaddr pc;
uint64_t cs_base;
CPUArchState *env;
tb_page_addr_t page_addr0;
uint32_t flags;
@ -193,7 +193,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return true;
} else {
tb_page_addr_t phys_page1;
target_ulong virt_page1;
vaddr virt_page1;
/*
* We know that the first page matched, and an otherwise valid TB
@ -214,8 +214,8 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return false;
}
static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
uint32_t cflags)
{
tb_page_addr_t phys_pc;
@ -238,9 +238,9 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
}
/* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base,
uint32_t flags, uint32_t cflags)
static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
uint32_t cflags)
{
TranslationBlock *tb;
CPUJumpCache *jc;
@ -292,13 +292,13 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
return tb;
}
static void log_cpu_exec(target_ulong pc, CPUState *cpu,
static void log_cpu_exec(vaddr pc, CPUState *cpu,
const TranslationBlock *tb)
{
if (qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [%08" PRIx64
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
"/%016" VADDR_PRIx "/%08x/%08x] %s\n",
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc));
@ -323,7 +323,7 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
}
}
static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
uint32_t *cflags)
{
CPUBreakpoint *bp;
@ -389,7 +389,7 @@ static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
return false;
}
static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
uint32_t *cflags)
{
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
@ -408,7 +408,8 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
@ -484,10 +485,10 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
cc->set_pc(cpu, last_tb->pc);
}
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
target_ulong pc = log_pc(cpu, last_tb);
vaddr pc = log_pc(cpu, last_tb);
if (qemu_log_in_addr_range(pc)) {
qemu_log("Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n",
qemu_log("Stopped execution of TB chain before %p [%016"
VADDR_PRIx "] %s\n",
last_tb->tc.ptr, pc, lookup_symbol(pc));
}
}
@ -525,11 +526,49 @@ static void cpu_exec_exit(CPUState *cpu)
}
}
static void cpu_exec_longjmp_cleanup(CPUState *cpu)
{
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#else
/*
* For softmmu, a tlb_fill fault during translation will land here,
* and we need to release any page locks held. In system mode we
* have one tcg_ctx per thread, so we know it was this cpu doing
* the translation.
*
* Alternative 1: Install a cleanup to be called via an exception
* handling safe longjmp. It seems plausible that all our hosts
* support such a thing. We'd have to properly register unwind info
* for the JIT for EH, rather that just for GDB.
*
* Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
* capture the cpu_loop_exit longjmp, perform the cleanup, and
* jump again to arrive here.
*/
if (tcg_ctx->gen_tb) {
tb_unlock_pages(tcg_ctx->gen_tb);
tcg_ctx->gen_tb = NULL;
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
}
void cpu_exec_step_atomic(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags;
int tb_exit;
@ -566,16 +605,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
cpu_exec_longjmp_cleanup(cpu);
}
/*
@ -902,8 +932,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
// LibAFL: Add last_tb_pc arg
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
target_ulong pc,
TranslationBlock **last_tb, int *tb_exit,
vaddr pc, TranslationBlock **last_tb,
int *tb_exit,
target_ulong *last_tb_pc)
{
int32_t insns_left;
@ -983,7 +1013,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb;
target_ulong cs_base, pc;
vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags;
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
@ -1080,20 +1111,7 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
{
/* Prepare setjmp context for exception handling. */
if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
cpu_exec_longjmp_cleanup(cpu);
}
return cpu_exec_loop(cpu, sc);

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,7 @@
#define ACCEL_TCG_INTERNAL_H
#include "exec/exec-all.h"
#include "exec/translate-all.h"
/*
* Access to the various translations structures need to be serialised
@ -35,6 +36,32 @@ static inline void page_table_config_init(void) { }
void page_table_config_init(void);
#endif
#ifdef CONFIG_USER_ONLY
/*
* For user-only, page_protect sets the page read-only.
* Since most execution is already on read-only pages, and we'd need to
* account for other TBs on the same page, defer undoing any page protection
* until we receive the write fault.
*/
static inline void tb_lock_page0(tb_page_addr_t p0)
{
page_protect(p0);
}
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
{
page_protect(p1);
}
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
static inline void tb_unlock_pages(TranslationBlock *tb) { }
#else
void tb_lock_page0(tb_page_addr_t);
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_pages(TranslationBlock *);
#endif
#ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
@ -42,20 +69,19 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
int cflags);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2);
TranslationBlock *tb_link_page(TranslationBlock *tb);
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
/* Return the current PC from CPU, which may be cached in TB. */
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
{
if (tb_cflags(tb) & CF_PCREL) {
return cpu->cc->get_pc(cpu);
@ -78,4 +104,38 @@ extern int64_t max_advance;
extern bool one_insn_per_tb;
/**
* tcg_req_mo:
* @type: TCGBar
*
* Filter @type to the barrier that is required for the guest
* memory ordering vs the host memory ordering. A non-zero
* result indicates that some barrier is required.
*
* If TCG_GUEST_DEFAULT_MO is not defined, assume that the
* guest requires strict ordering.
*
* This is a macro so that it's constant even without optimization.
*/
#ifdef TCG_GUEST_DEFAULT_MO
# define tcg_req_mo(type) \
((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
#else
# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
#endif
/**
* cpu_req_mo:
* @type: TCGBar
*
* If tcg_req_mo indicates a barrier for @type is required
* for the guest memory model, issue a host memory barrier.
*/
#define cpu_req_mo(type) \
do { \
if (tcg_req_mo(type)) { \
smp_mb(); \
} \
} while (0)
#endif /* ACCEL_TCG_INTERNAL_H */

View File

@ -159,10 +159,12 @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*/
WITH_MMAP_LOCK_GUARD() {
if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
uint64_t *p = __builtin_assume_aligned(pv, 8);
return *p;
}
}
#endif
/* Ultimate fallback: re-execute in serial context. */
@ -186,26 +188,28 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
return atomic16_read_ro(p);
}
#ifdef CONFIG_USER_ONLY
/*
* We can only use cmpxchg to emulate a load if the page is writable.
* If the page is not writable, then assume the value is immutable
* and requires no locking. This ignores the case of MAP_SHARED with
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*
* In system mode all guest pages are writable. For user mode,
* we must take mmap_lock so that the query remains valid until
* the write is complete -- tests/tcg/multiarch/munmap-pthread.c
* is an example that can race.
*/
WITH_MMAP_LOCK_GUARD() {
#ifdef CONFIG_USER_ONLY
if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
return *p;
}
#endif
/*
* In system mode all guest pages are writable, and for user-only
* we have just checked writability. Try cmpxchg.
*/
if (HAVE_ATOMIC128_RW) {
return atomic16_read_rw(p);
}
}
/* Ultimate fallback: re-execute in serial context. */
cpu_loop_exit_atomic(env_cpu(env), ra);
@ -400,8 +404,11 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
return load_atomic2(pv);
}
if (HAVE_ATOMIC128_RO) {
intptr_t left_in_page = -(pi | TARGET_PAGE_MASK);
if (likely(left_in_page > 8)) {
return load_atom_extract_al16_or_al8(pv, 2);
}
}
atmax = required_atomicity(env, pi, memop);
switch (atmax) {
@ -439,8 +446,11 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
return load_atomic4(pv);
}
if (HAVE_ATOMIC128_RO) {
intptr_t left_in_page = -(pi | TARGET_PAGE_MASK);
if (likely(left_in_page > 8)) {
return load_atom_extract_al16_or_al8(pv, 4);
}
}
atmax = required_atomicity(env, pi, memop);
switch (atmax) {

View File

@ -81,37 +81,6 @@ HumanReadableText *qmp_x_query_opcount(Error **errp)
return human_readable_text_from_str(buf);
}
#ifdef CONFIG_PROFILER
int64_t dev_time;
HumanReadableText *qmp_x_query_profile(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
static int64_t last_cpu_exec_time;
int64_t cpu_exec_time;
int64_t delta;
cpu_exec_time = tcg_cpu_exec_time();
delta = cpu_exec_time - last_cpu_exec_time;
g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
delta, delta / (double)NANOSECONDS_PER_SECOND);
last_cpu_exec_time = cpu_exec_time;
dev_time = 0;
return human_readable_text_from_str(buf);
}
#else
HumanReadableText *qmp_x_query_profile(Error **errp)
{
error_setg(errp, "Internal profiler not compiled");
return NULL;
}
#endif
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);

View File

@ -35,16 +35,16 @@
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
{
target_ulong tmp;
vaddr tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
}
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
{
target_ulong tmp;
vaddr tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK));
@ -53,7 +53,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
#else
/* In user-mode we can get better hashing because we do not have a TLB */
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
{
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
}
@ -61,7 +61,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
#endif /* CONFIG_SOFTMMU */
static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc,
uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
uint32_t flags, uint64_t flags2, uint32_t cf_mask)
{
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);

View File

@ -21,7 +21,7 @@ struct CPUJumpCache {
struct rcu_head rcu;
struct {
TranslationBlock *tb;
target_ulong pc;
vaddr pc;
} array[TB_JMP_CACHE_SIZE];
};

View File

@ -70,17 +70,7 @@ typedef struct PageDesc PageDesc;
*/
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2,
bool alloc)
{
*ret_p1 = NULL;
*ret_p2 = NULL;
}
static inline void page_unlock(PageDesc *pd) { }
static inline void page_lock_tb(const TranslationBlock *tb) { }
static inline void page_unlock_tb(const TranslationBlock *tb) { }
static inline void tb_lock_pages(const TranslationBlock *tb) { }
/*
* For user-only, since we are protecting all of memory with a single lock,
@ -96,9 +86,9 @@ static void tb_remove_all(void)
}
/* Call with mmap_lock held. */
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
static void tb_record(TranslationBlock *tb)
{
target_ulong addr;
vaddr addr;
int flags;
assert_memory_lock();
@ -391,12 +381,108 @@ static void page_lock(PageDesc *pd)
qemu_spin_lock(&pd->lock);
}
/* Like qemu_spin_trylock, returns false on success */
static bool page_trylock(PageDesc *pd)
{
bool busy = qemu_spin_trylock(&pd->lock);
if (!busy) {
page_lock__debug(pd);
}
return busy;
}
static void page_unlock(PageDesc *pd)
{
qemu_spin_unlock(&pd->lock);
page_unlock__debug(pd);
}
void tb_lock_page0(tb_page_addr_t paddr)
{
page_lock(page_find_alloc(paddr >> TARGET_PAGE_BITS, true));
}
void tb_lock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
{
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
PageDesc *pd0, *pd1;
if (pindex0 == pindex1) {
/* Identical pages, and the first page is already locked. */
return;
}
pd1 = page_find_alloc(pindex1, true);
if (pindex0 < pindex1) {
/* Correct locking order, we may block. */
page_lock(pd1);
return;
}
/* Incorrect locking order, we cannot block lest we deadlock. */
if (!page_trylock(pd1)) {
return;
}
/*
* Drop the lock on page0 and get both page locks in the right order.
* Restart translation via longjmp.
*/
pd0 = page_find_alloc(pindex0, false);
page_unlock(pd0);
page_lock(pd1);
page_lock(pd0);
siglongjmp(tcg_ctx->jmp_trans, -3);
}
void tb_unlock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
{
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (pindex0 != pindex1) {
page_unlock(page_find_alloc(pindex1, false));
}
}
static void tb_lock_pages(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (unlikely(paddr0 == -1)) {
return;
}
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
if (pindex0 < pindex1) {
page_lock(page_find_alloc(pindex0, true));
page_lock(page_find_alloc(pindex1, true));
return;
}
page_lock(page_find_alloc(pindex1, true));
}
page_lock(page_find_alloc(pindex0, true));
}
void tb_unlock_pages(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (unlikely(paddr0 == -1)) {
return;
}
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
page_unlock(page_find_alloc(pindex1, false));
}
page_unlock(page_find_alloc(pindex0, false));
}
static inline struct page_entry *
page_entry_new(PageDesc *pd, tb_page_addr_t index)
{
@ -420,13 +506,10 @@ static void page_entry_destroy(gpointer p)
/* returns false on success */
static bool page_entry_trylock(struct page_entry *pe)
{
bool busy;
busy = qemu_spin_trylock(&pe->pd->lock);
bool busy = page_trylock(pe->pd);
if (!busy) {
g_assert(!pe->locked);
pe->locked = true;
page_lock__debug(pe->pd);
}
return busy;
}
@ -604,8 +687,7 @@ static void tb_remove_all(void)
* Add the tb in the target page and protect it if necessary.
* Called with @p->lock held.
*/
static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
unsigned int n)
static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
{
bool page_already_protected;
@ -625,15 +707,21 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
}
}
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
static void tb_record(TranslationBlock *tb)
{
tb_page_add(p1, tb, 0);
if (unlikely(p2)) {
tb_page_add(p2, tb, 1);
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
assert(paddr0 != -1);
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
tb_page_add(page_find_alloc(pindex1, false), tb, 1);
}
tb_page_add(page_find_alloc(pindex0, false), tb, 0);
}
static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
{
TranslationBlock *tb1;
uintptr_t *pprev;
@ -653,74 +741,16 @@ static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
static void tb_remove(TranslationBlock *tb)
{
PageDesc *pd;
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
pd = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
tb_page_remove(pd, tb);
if (unlikely(tb->page_addr[1] != -1)) {
pd = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
tb_page_remove(pd, tb);
}
}
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
{
PageDesc *p1, *p2;
tb_page_addr_t page1;
tb_page_addr_t page2;
assert_memory_lock();
g_assert(phys1 != -1);
page1 = phys1 >> TARGET_PAGE_BITS;
page2 = phys2 >> TARGET_PAGE_BITS;
p1 = page_find_alloc(page1, alloc);
if (ret_p1) {
*ret_p1 = p1;
}
if (likely(phys2 == -1)) {
page_lock(p1);
return;
} else if (page1 == page2) {
page_lock(p1);
if (ret_p2) {
*ret_p2 = p1;
}
return;
}
p2 = page_find_alloc(page2, alloc);
if (ret_p2) {
*ret_p2 = p2;
}
if (page1 < page2) {
page_lock(p1);
page_lock(p2);
} else {
page_lock(p2);
page_lock(p1);
}
}
/* lock the page(s) of a TB in the correct acquisition order */
static void page_lock_tb(const TranslationBlock *tb)
{
page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
}
static void page_unlock_tb(const TranslationBlock *tb)
{
PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
page_unlock(p1);
if (unlikely(tb_page_addr1(tb) != -1)) {
PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
if (p2 != p1) {
page_unlock(p2);
}
assert(paddr0 != -1);
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
tb_page_remove(page_find_alloc(pindex1, false), tb);
}
tb_page_remove(page_find_alloc(pindex0, false), tb);
}
#endif /* CONFIG_USER_ONLY */
@ -925,18 +955,16 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
if (page_addr == -1 && tb_page_addr0(tb) != -1) {
page_lock_tb(tb);
tb_lock_pages(tb);
do_tb_phys_invalidate(tb, true);
page_unlock_tb(tb);
tb_unlock_pages(tb);
} else {
do_tb_phys_invalidate(tb, false);
}
}
/*
* Add a new TB and link it to the physical page tables. phys_page2 is
* (-1) to indicate that only one page contains the TB.
*
* Add a new TB and link it to the physical page tables.
* Called with mmap_lock held for user-mode emulation.
*
* Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
@ -944,43 +972,29 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
* for the same block of guest code that @tb corresponds to. In that case,
* the caller should discard the original @tb, and use instead the returned TB.
*/
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2)
TranslationBlock *tb_link_page(TranslationBlock *tb)
{
PageDesc *p;
PageDesc *p2 = NULL;
void *existing_tb = NULL;
uint32_t h;
assert_memory_lock();
tcg_debug_assert(!(tb->cflags & CF_INVALID));
/*
* Add the TB to the page list, acquiring first the pages's locks.
* We keep the locks held until after inserting the TB in the hash table,
* so that if the insertion fails we know for sure that the TBs are still
* in the page descriptors.
* Note that inserting into the hash table first isn't an option, since
* we can only insert TBs that are fully initialized.
*/
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
tb_record(tb, p, p2);
tb_record(tb);
/* add in the hash table */
h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc),
h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cs_base, tb->cflags);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
/* remove TB from the page(s) if we couldn't insert it */
if (unlikely(existing_tb)) {
tb_remove(tb);
tb = existing_tb;
tb_unlock_pages(tb);
return existing_tb;
}
if (p2 && p2 != p) {
page_unlock(p2);
}
page_unlock(p);
tb_unlock_pages(tb);
return tb;
}
@ -1092,6 +1106,9 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
#endif /* TARGET_HAS_PRECISE_SMC */
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
/*
* We remove all the TBs in the range [start, last].
* XXX: see if in some cases it could be faster to invalidate all the code
@ -1182,15 +1199,17 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
index_last = last >> TARGET_PAGE_BITS;
for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
PageDesc *pd = page_find(index);
tb_page_addr_t bound;
tb_page_addr_t page_start, page_last;
if (pd == NULL) {
continue;
}
assert_page_locked(pd);
bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
bound = MIN(bound, last);
tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
page_start = index << TARGET_PAGE_BITS;
page_last = page_start | ~TARGET_PAGE_MASK;
page_last = MIN(page_last, last);
tb_invalidate_phys_page_range__locked(pages, pd,
page_start, page_last, 0);
}
page_collection_unlock(pages);
}

View File

@ -152,8 +152,4 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
}

View File

@ -329,9 +329,6 @@ void rr_start_vcpu_thread(CPUState *cpu)
single_tcg_halt_cond = cpu->halt_cond;
single_tcg_cpu_thread = cpu->thread;
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
} else {
/* we share the thread */
cpu->thread = single_tcg_cpu_thread;

View File

@ -70,20 +70,10 @@ void tcg_cpus_destroy(CPUState *cpu)
int tcg_cpus_exec(CPUState *cpu)
{
int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
assert(tcg_enabled());
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
#ifdef CONFIG_PROFILER
qatomic_set(&tcg_ctx->prof.cpu_exec_time,
tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
#endif
return ret;
}

View File

@ -64,26 +64,10 @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak
*
* Once a guest architecture has been converted to the new primitives
* there are two remaining limitations to check.
*
* there is one remaining limitation to check:
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
* - The host must have a stronger memory order than the guest
*
* It may be possible in future to support strong guests on weak hosts
* but that will require tagging all load/stores in a guest with their
* implicit memory order requirements which would likely slow things
* down a lot.
*/
static bool check_tcg_memory_orders_compatible(void)
{
#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
#else
return false;
#endif
}
static bool default_mttcg_enabled(void)
{
@ -96,14 +80,16 @@ static bool default_mttcg_enabled(void)
if (icount_enabled() || TCG_OVERSIZED_GUEST) {
return false;
} else {
}
#ifdef TARGET_SUPPORTS_MTTCG
return check_tcg_memory_orders_compatible();
# ifndef TCG_GUEST_DEFAULT_MO
# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
# endif
return true;
#else
return false;
#endif
}
}
static void tcg_accel_instance_init(Object *obj)
{
@ -170,11 +156,6 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
warn_report("Guest not yet converted to MTTCG - "
"you may get unexpected results");
#endif
if (!check_tcg_memory_orders_compatible()) {
warn_report("Guest expects a stronger memory ordering "
"than the host provides");
error_printf("This may cause strange/hard to debug errors\n");
}
s->mttcg_enabled = true;
}
} else if (strcmp(value, "single") == 0) {

View File

@ -58,7 +58,7 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
i64, env, i64, i64, i64, i32)
#endif
#ifdef CONFIG_CMPXCHG128
#if HAVE_CMPXCHG128
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,

View File

@ -759,10 +759,6 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc)
{
uint64_t data[TARGET_INSN_START_WORDS];
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
int64_t ti = profile_getclock();
#endif
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) {
@ -779,12 +775,6 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
}
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
#ifdef CONFIG_PROFILER
qatomic_set(&prof->restore_time,
prof->restore_time + profile_getclock() - ti);
qatomic_set(&prof->restore_count, prof->restore_count + 1);
#endif
}
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
@ -831,7 +821,7 @@ void page_init(void)
* Return the size of the generated code, or negative on error.
*/
static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
target_ulong pc, void *host_pc,
vaddr pc, void *host_pc,
int *max_insns, int64_t *ti)
{
int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
@ -868,13 +858,6 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
tcg_ctx->cpu = NULL;
*max_insns = tb->icount;
#ifdef CONFIG_PROFILER
qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1);
qatomic_set(&tcg_ctx->prof.interm_time,
tcg_ctx->prof.interm_time + profile_getclock() - *ti);
*ti = profile_getclock();
#endif
return tcg_gen_code(tcg_ctx, tb, pc);
}
@ -1062,17 +1045,14 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
vaddr pc, uint64_t cs_base,
uint32_t flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc;
tb_page_addr_t phys_pc, phys_p2;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns;
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
#endif
int64_t ti;
void *host_pc;
@ -1093,6 +1073,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
buffer_overflow:
assert_no_pages_locked();
tb = tcg_tb_alloc(tcg_ctx);
if (unlikely(!tb)) {
/* flush must be done */
@ -1113,6 +1094,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->cflags = cflags;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
if (phys_pc != -1) {
tb_lock_page0(phys_pc);
}
tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
#ifdef CONFIG_SOFTMMU
@ -1129,14 +1114,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_ctx->guest_mo = TCG_MO_ALL;
#endif
tb_overflow:
#ifdef CONFIG_PROFILER
/* includes aborted translations because of exceptions */
qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
ti = profile_getclock();
#endif
restart_translate:
trace_translate_block(tb, pc, tb->tc.ptr);
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
@ -1155,6 +1133,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation for "
"code_gen_buffer overflow\n");
tb_unlock_pages(tb);
tcg_ctx->gen_tb = NULL;
goto buffer_overflow;
case -2:
@ -1173,14 +1153,39 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
"Restarting code generation with "
"smaller translation block (max %d insns)\n",
max_insns);
goto tb_overflow;
/*
* The half-sized TB may not cross pages.
* TODO: Fix all targets that cross pages except with
* the first insn, at which point this can't be reached.
*/
phys_p2 = tb_page_addr1(tb);
if (unlikely(phys_p2 != -1)) {
tb_unlock_page1(phys_pc, phys_p2);
tb_set_page_addr1(tb, -1);
}
goto restart_translate;
case -3:
/*
* We had a page lock ordering problem. In order to avoid
* deadlock we had to drop the lock on page0, which means
* that everything we translated so far is compromised.
* Restart with locks held on both pages.
*/
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation with re-locked pages");
goto restart_translate;
default:
g_assert_not_reached();
}
}
tcg_ctx->gen_tb = NULL;
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
tb_unlock_pages(tb);
goto buffer_overflow;
}
tb->tc.size = gen_code_size;
@ -1199,13 +1204,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
*/
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
#ifdef CONFIG_PROFILER
qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
#endif
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(pc)) {
FILE *logfile = qemu_log_trylock();
@ -1305,6 +1303,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* before attempting to link to other TBs or add to the lookup table.
*/
if (tb_page_addr0(tb) == -1) {
assert_no_pages_locked();
return tb;
}
@ -1319,7 +1318,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state.
*/
existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb));
existing_tb = tb_link_page(tb);
assert_no_pages_locked();
/* if the TB already exists, discard what we just translated */
if (unlikely(existing_tb != tb)) {
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
@ -1348,7 +1349,8 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
/* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
vaddr pc;
uint64_t cs_base;
tb_page_addr_t addr;
uint32_t flags;
@ -1402,10 +1404,10 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
target_ulong pc = log_pc(cpu, tb);
vaddr pc = log_pc(cpu, tb);
if (qemu_log_in_addr_range(pc)) {
qemu_log("cpu_io_recompile: rewound execution of TB to "
TARGET_FMT_lx "\n", pc);
qemu_log("cpu_io_recompile: rewound execution of TB to %016"
VADDR_PRIx "\n", pc);
}
}

View File

@ -12,9 +12,9 @@
#include "qemu/error-report.h"
#include "exec/exec-all.h"
#include "exec/translator.h"
#include "exec/translate-all.h"
#include "exec/plugin-gen.h"
#include "tcg/tcg-op-common.h"
#include "internal.h"
static void gen_io_start(void)
{
@ -160,7 +160,7 @@ extern struct libafl_backdoor_hook* libafl_backdoor_hooks;
//// --- End LibAFL code ---
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
{
/* Suppress goto_tb if requested. */
if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
@ -172,8 +172,8 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
}
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc,
const TranslatorOps *ops, DisasContextBase *db)
vaddr pc, void *host_pc, const TranslatorOps *ops,
DisasContextBase *db)
{
uint32_t cflags = tb_cflags(tb);
TCGOp *icount_start_insn;
@ -190,10 +190,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
#ifdef CONFIG_USER_ONLY
page_protect(pc);
#endif
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@ -352,10 +348,10 @@ post_translate_insn:
}
static void *translator_access(CPUArchState *env, DisasContextBase *db,
target_ulong pc, size_t len)
vaddr pc, size_t len)
{
void *host;
target_ulong base, end;
vaddr base, end;
TranslationBlock *tb;
tb = db->tb;
@ -373,22 +369,36 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
host = db->host_addr[1];
base = TARGET_PAGE_ALIGN(db->pc_first);
if (host == NULL) {
tb_page_addr_t phys_page =
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
tb_page_addr_t page0, old_page1, new_page1;
new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
/*
* If the second page is MMIO, treat as if the first page
* was MMIO as well, so that we do not cache the TB.
*/
if (unlikely(phys_page == -1)) {
if (unlikely(new_page1 == -1)) {
tb_unlock_pages(tb);
tb_set_page_addr0(tb, -1);
return NULL;
}
tb_set_page_addr1(tb, phys_page);
#ifdef CONFIG_USER_ONLY
page_protect(end);
#endif
/*
* If this is not the first time around, and page1 matches,
* then we already have the page locked. Alternately, we're
* not doing anything to prevent the PTE from changing, so
* we might wind up with a different page, requiring us to
* re-do the locking.
*/
old_page1 = tb_page_addr1(tb);
if (likely(new_page1 != old_page1)) {
page0 = tb_page_addr0(tb);
if (unlikely(old_page1 != -1)) {
tb_unlock_page1(page0, old_page1);
}
tb_set_page_addr1(tb, new_page1);
tb_lock_page1(page0, new_page1);
}
host = db->host_addr[1];
}

View File

@ -144,7 +144,7 @@ typedef struct PageFlagsNode {
static IntervalTreeRoot pageflags_root;
static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
{
IntervalTreeNode *n;
@ -153,7 +153,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
}
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
target_long last)
target_ulong last)
{
IntervalTreeNode *n;
@ -520,19 +520,19 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
}
}
int page_check_range(target_ulong start, target_ulong len, int flags)
bool page_check_range(target_ulong start, target_ulong len, int flags)
{
target_ulong last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
int ret;
bool ret;
if (len == 0) {
return 0; /* trivial length */
return true; /* trivial length */
}
last = start + len - 1;
if (last < start) {
return -1; /* wrap around */
return false; /* wrap around */
}
locked = have_mmap_lock();
@ -551,33 +551,33 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
p = pageflags_find(start, last);
}
if (!p) {
ret = -1; /* entire region invalid */
ret = false; /* entire region invalid */
break;
}
}
if (start < p->itree.start) {
ret = -1; /* initial bytes invalid */
ret = false; /* initial bytes invalid */
break;
}
missing = flags & ~p->flags;
if (missing & PAGE_READ) {
ret = -1; /* page not readable */
if (missing & ~PAGE_WRITE) {
ret = false; /* page doesn't match */
break;
}
if (missing & PAGE_WRITE) {
if (!(p->flags & PAGE_WRITE_ORG)) {
ret = -1; /* page not writable */
ret = false; /* page not writable */
break;
}
/* Asking about writable, but has been protected: undo. */
if (!page_unprotect(start, 0)) {
ret = -1;
ret = false;
break;
}
/* TODO: page_unprotect should take a range, not a single page. */
if (last - start < TARGET_PAGE_SIZE) {
ret = 0; /* ok */
ret = true; /* ok */
break;
}
start += TARGET_PAGE_SIZE;
@ -585,7 +585,7 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
}
if (last <= p->itree.last) {
ret = 0; /* ok */
ret = true; /* ok */
break;
}
start = p->itree.last + 1;
@ -598,6 +598,54 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
return ret;
}
bool page_check_range_empty(target_ulong start, target_ulong last)
{
assert(last >= start);
assert_memory_lock();
return pageflags_find(start, last) == NULL;
}
target_ulong page_find_range_empty(target_ulong min, target_ulong max,
target_ulong len, target_ulong align)
{
target_ulong len_m1, align_m1;
assert(min <= max);
assert(max <= GUEST_ADDR_MAX);
assert(len != 0);
assert(is_power_of_2(align));
assert_memory_lock();
len_m1 = len - 1;
align_m1 = align - 1;
/* Iteratively narrow the search region. */
while (1) {
PageFlagsNode *p;
/* Align min and double-check there's enough space remaining. */
min = (min + align_m1) & ~align_m1;
if (min > max) {
return -1;
}
if (len_m1 > max - min) {
return -1;
}
p = pageflags_find(min, min + len_m1);
if (p == NULL) {
/* Found! */
return min;
}
if (max <= p->itree.last) {
/* Existing allocation fills the remainder of the search region. */
return -1;
}
/* Skip across existing allocation. */
min = p->itree.last + 1;
}
}
void page_protect(tb_page_addr_t address)
{
PageFlagsNode *p;
@ -721,7 +769,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
return current_tb_invalidated ? 2 : 1;
}
static int probe_access_internal(CPUArchState *env, target_ulong addr,
static int probe_access_internal(CPUArchState *env, vaddr addr,
int fault_size, MMUAccessType access_type,
bool nonfault, uintptr_t ra)
{
@ -745,6 +793,10 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
if (guest_addr_valid_untagged(addr)) {
int page_flags = page_get_flags(addr);
if (page_flags & acc_flag) {
if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
&& cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
return TLB_MMIO;
}
return 0; /* success */
}
maperr = !(page_flags & PAGE_VALID);
@ -759,7 +811,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
}
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra)
{
@ -767,23 +819,23 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
*phost = flags ? NULL : g2h(env_cpu(env), addr);
*phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
return flags;
}
void *probe_access(CPUArchState *env, target_ulong addr, int size,
void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t ra)
{
int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, false, ra);
g_assert(flags == 0);
g_assert((flags & ~TLB_MMIO) == 0);
return size ? g2h(env_cpu(env), addr) : NULL;
}
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
int flags;
@ -889,7 +941,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
/* The softmmu versions of these helpers are in cputlb.c. */
static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr,
static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
int a_bits = get_alignment_bits(mop);
@ -914,6 +966,7 @@ static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
uint8_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = ldub_p(haddr);
clear_helper_retaddr();
@ -947,6 +1000,7 @@ static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
uint16_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_2(env, ra, haddr, mop);
clear_helper_retaddr();
@ -984,6 +1038,7 @@ static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
uint32_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_4(env, ra, haddr, mop);
clear_helper_retaddr();
@ -1021,6 +1076,7 @@ static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
uint64_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_8(env, ra, haddr, mop);
clear_helper_retaddr();
@ -1052,6 +1108,7 @@ static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
Int128 ret;
tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(env, ra, haddr, mop);
clear_helper_retaddr();
@ -1087,6 +1144,7 @@ static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
@ -1111,6 +1169,7 @@ static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@ -1139,6 +1198,7 @@ static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@ -1167,6 +1227,7 @@ static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@ -1195,6 +1256,7 @@ static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@ -1324,8 +1386,8 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
/*
* Do not allow unaligned operations to proceed. Return the host address.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
MemOpIdx oi, int size, uintptr_t retaddr)
static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
@ -1371,7 +1433,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#include "atomic_template.h"
#endif
#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
#define DATA_SIZE 16
#include "atomic_template.h"
#endif

View File

@ -29,7 +29,11 @@
#include "qemu/timer.h"
#include "qemu/dbus.h"
#ifdef G_OS_UNIX
#include <gio/gunixfdlist.h>
#endif
#include "ui/dbus.h"
#include "ui/dbus-display1.h"
#define AUDIO_CAP "dbus"
@ -444,7 +448,9 @@ listener_in_vanished_cb(GDBusConnection *connection,
static gboolean
dbus_audio_register_listener(AudioState *s,
GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list,
#endif
GVariant *arg_listener,
bool out)
{
@ -471,6 +477,11 @@ dbus_audio_register_listener(AudioState *s,
return DBUS_METHOD_INVOCATION_HANDLED;
}
#ifdef G_OS_WIN32
if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) {
return DBUS_METHOD_INVOCATION_HANDLED;
}
#else
fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err);
if (err) {
g_dbus_method_invocation_return_error(invocation,
@ -480,6 +491,7 @@ dbus_audio_register_listener(AudioState *s,
err->message);
return DBUS_METHOD_INVOCATION_HANDLED;
}
#endif
socket = g_socket_new_from_fd(fd, &err);
if (err) {
@ -488,15 +500,28 @@ dbus_audio_register_listener(AudioState *s,
DBUS_DISPLAY_ERROR_FAILED,
"Couldn't make a socket: %s",
err->message);
#ifdef G_OS_WIN32
closesocket(fd);
#else
close(fd);
#endif
return DBUS_METHOD_INVOCATION_HANDLED;
}
socket_conn = g_socket_connection_factory_create_connection(socket);
if (out) {
qemu_dbus_display1_audio_complete_register_out_listener(
da->iface, invocation, NULL);
da->iface, invocation
#ifdef G_OS_UNIX
, NULL
#endif
);
} else {
qemu_dbus_display1_audio_complete_register_in_listener(
da->iface, invocation, NULL);
da->iface, invocation
#ifdef G_OS_UNIX
, NULL
#endif
);
}
listener_conn =
@ -574,22 +599,32 @@ dbus_audio_register_listener(AudioState *s,
static gboolean
dbus_audio_register_out_listener(AudioState *s,
GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list,
#endif
GVariant *arg_listener)
{
return dbus_audio_register_listener(s, invocation,
fd_list, arg_listener, true);
#ifdef G_OS_UNIX
fd_list,
#endif
arg_listener, true);
}
static gboolean
dbus_audio_register_in_listener(AudioState *s,
GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list,
#endif
GVariant *arg_listener)
{
return dbus_audio_register_listener(s, invocation,
fd_list, arg_listener, false);
#ifdef G_OS_UNIX
fd_list,
#endif
arg_listener, false);
}
static void

View File

@ -31,7 +31,7 @@ endforeach
if dbus_display
module_ss = ss.source_set()
module_ss.add(when: gio, if_true: files('dbusaudio.c'))
module_ss.add(when: [gio, pixman], if_true: files('dbusaudio.c'))
audio_modules += {'dbus': module_ss}
endif

View File

@ -1,5 +1,5 @@
/*
* QEMU Pipewire audio driver
* QEMU PipeWire audio driver
*
* Copyright (c) 2023 Red Hat Inc.
*
@ -66,6 +66,9 @@ typedef struct PWVoiceIn {
PWVoice v;
} PWVoiceIn;
#define PW_VOICE_IN(v) ((PWVoiceIn *)v)
#define PW_VOICE_OUT(v) ((PWVoiceOut *)v)
static void
stream_destroy(void *data)
{
@ -197,16 +200,6 @@ on_stream_state_changed(void *data, enum pw_stream_state old,
trace_pw_state_changed(pw_stream_get_node_id(v->stream),
pw_stream_state_as_string(state));
switch (state) {
case PW_STREAM_STATE_ERROR:
case PW_STREAM_STATE_UNCONNECTED:
break;
case PW_STREAM_STATE_PAUSED:
case PW_STREAM_STATE_CONNECTING:
case PW_STREAM_STATE_STREAMING:
break;
}
}
static const struct pw_stream_events capture_stream_events = {
@ -424,7 +417,7 @@ pw_to_audfmt(enum spa_audio_format fmt, int *endianness,
}
static int
create_stream(pwaudio *c, PWVoice *v, const char *stream_name,
qpw_stream_new(pwaudio *c, PWVoice *v, const char *stream_name,
const char *name, enum spa_direction dir)
{
int res;
@ -436,6 +429,10 @@ create_stream(pwaudio *c, PWVoice *v, const char *stream_name,
struct pw_properties *props;
props = pw_properties_new(NULL, NULL);
if (!props) {
error_report("Failed to create PW properties: %s", g_strerror(errno));
return -1;
}
/* 75% of the timer period for faster updates */
buf_samples = (uint64_t)v->g->dev->timer_period * v->info.rate
@ -448,8 +445,8 @@ create_stream(pwaudio *c, PWVoice *v, const char *stream_name,
pw_properties_set(props, PW_KEY_TARGET_OBJECT, name);
}
v->stream = pw_stream_new(c->core, stream_name, props);
if (v->stream == NULL) {
error_report("Failed to create PW stream: %s", g_strerror(errno));
return -1;
}
@ -477,6 +474,7 @@ create_stream(pwaudio *c, PWVoice *v, const char *stream_name,
PW_STREAM_FLAG_MAP_BUFFERS |
PW_STREAM_FLAG_RT_PROCESS, params, n_params);
if (res < 0) {
error_report("Failed to connect PW stream: %s", g_strerror(errno));
pw_stream_destroy(v->stream);
return -1;
}
@ -484,71 +482,37 @@ create_stream(pwaudio *c, PWVoice *v, const char *stream_name,
return 0;
}
static int
qpw_stream_new(pwaudio *c, PWVoice *v, const char *stream_name,
const char *name, enum spa_direction dir)
static void
qpw_set_position(uint32_t channels, uint32_t position[SPA_AUDIO_MAX_CHANNELS])
{
int r;
switch (v->info.channels) {
memcpy(position, (uint32_t[SPA_AUDIO_MAX_CHANNELS]) { SPA_AUDIO_CHANNEL_UNKNOWN, },
sizeof(uint32_t) * SPA_AUDIO_MAX_CHANNELS);
/*
* TODO: This currently expects the only frontend supporting more than 2
* channels is the usb-audio. We will need some means to set channel
* order when a new frontend gains multi-channel support.
*/
switch (channels) {
case 8:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
v->info.position[4] = SPA_AUDIO_CHANNEL_RL;
v->info.position[5] = SPA_AUDIO_CHANNEL_RR;
v->info.position[6] = SPA_AUDIO_CHANNEL_SL;
v->info.position[7] = SPA_AUDIO_CHANNEL_SR;
break;
position[6] = SPA_AUDIO_CHANNEL_SL;
position[7] = SPA_AUDIO_CHANNEL_SR;
/* fallthrough */
case 6:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
v->info.position[4] = SPA_AUDIO_CHANNEL_RL;
v->info.position[5] = SPA_AUDIO_CHANNEL_RR;
break;
case 5:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
v->info.position[3] = SPA_AUDIO_CHANNEL_LFE;
v->info.position[4] = SPA_AUDIO_CHANNEL_RC;
break;
case 4:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_FC;
v->info.position[3] = SPA_AUDIO_CHANNEL_RC;
break;
case 3:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
v->info.position[2] = SPA_AUDIO_CHANNEL_LFE;
break;
position[2] = SPA_AUDIO_CHANNEL_FC;
position[3] = SPA_AUDIO_CHANNEL_LFE;
position[4] = SPA_AUDIO_CHANNEL_RL;
position[5] = SPA_AUDIO_CHANNEL_RR;
/* fallthrough */
case 2:
v->info.position[0] = SPA_AUDIO_CHANNEL_FL;
v->info.position[1] = SPA_AUDIO_CHANNEL_FR;
position[0] = SPA_AUDIO_CHANNEL_FL;
position[1] = SPA_AUDIO_CHANNEL_FR;
break;
case 1:
v->info.position[0] = SPA_AUDIO_CHANNEL_MONO;
position[0] = SPA_AUDIO_CHANNEL_MONO;
break;
default:
for (size_t i = 0; i < v->info.channels; i++) {
v->info.position[i] = SPA_AUDIO_CHANNEL_UNKNOWN;
dolog("Internal error: unsupported channel count %d\n", channels);
}
break;
}
/* create a new unconnected pwstream */
r = create_stream(c, v, stream_name, name, dir);
if (r < 0) {
AUD_log(AUDIO_CAP, "Failed to create stream.");
return -1;
}
return r;
}
static int
@ -566,6 +530,7 @@ qpw_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque)
v->info.format = audfmt_to_pw(as->fmt, as->endianness);
v->info.channels = as->nchannels;
qpw_set_position(as->nchannels, v->info.position);
v->info.rate = as->freq;
obt_as.fmt =
@ -579,7 +544,6 @@ qpw_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque)
r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id,
ppdo->name, SPA_DIRECTION_OUTPUT);
if (r < 0) {
error_report("qpw_stream_new for playback failed");
pw_thread_loop_unlock(c->thread_loop);
return -1;
}
@ -613,6 +577,7 @@ qpw_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
v->info.format = audfmt_to_pw(as->fmt, as->endianness);
v->info.channels = as->nchannels;
qpw_set_position(as->nchannels, v->info.position);
v->info.rate = as->freq;
obt_as.fmt =
@ -623,7 +588,6 @@ qpw_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id,
ppdo->name, SPA_DIRECTION_INPUT);
if (r < 0) {
error_report("qpw_stream_new for recording failed");
pw_thread_loop_unlock(c->thread_loop);
return -1;
}
@ -640,105 +604,85 @@ qpw_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
}
static void
qpw_fini_out(HWVoiceOut *hw)
qpw_voice_fini(PWVoice *v)
{
PWVoiceOut *pw = (PWVoiceOut *) hw;
PWVoice *v = &pw->v;
if (v->stream) {
pwaudio *c = v->g;
if (!v->stream) {
return;
}
pw_thread_loop_lock(c->thread_loop);
pw_stream_destroy(v->stream);
v->stream = NULL;
pw_thread_loop_unlock(c->thread_loop);
}
static void
qpw_fini_out(HWVoiceOut *hw)
{
qpw_voice_fini(&PW_VOICE_OUT(hw)->v);
}
static void
qpw_fini_in(HWVoiceIn *hw)
{
PWVoiceIn *pw = (PWVoiceIn *) hw;
PWVoice *v = &pw->v;
qpw_voice_fini(&PW_VOICE_IN(hw)->v);
}
if (v->stream) {
static void
qpw_voice_set_enabled(PWVoice *v, bool enable)
{
pwaudio *c = v->g;
pw_thread_loop_lock(c->thread_loop);
pw_stream_destroy(v->stream);
v->stream = NULL;
pw_stream_set_active(v->stream, enable);
pw_thread_loop_unlock(c->thread_loop);
}
}
static void
qpw_enable_out(HWVoiceOut *hw, bool enable)
{
PWVoiceOut *po = (PWVoiceOut *) hw;
PWVoice *v = &po->v;
pwaudio *c = v->g;
pw_thread_loop_lock(c->thread_loop);
pw_stream_set_active(v->stream, enable);
pw_thread_loop_unlock(c->thread_loop);
qpw_voice_set_enabled(&PW_VOICE_OUT(hw)->v, enable);
}
static void
qpw_enable_in(HWVoiceIn *hw, bool enable)
{
PWVoiceIn *pi = (PWVoiceIn *) hw;
PWVoice *v = &pi->v;
qpw_voice_set_enabled(&PW_VOICE_IN(hw)->v, enable);
}
static void
qpw_voice_set_volume(PWVoice *v, Volume *vol)
{
pwaudio *c = v->g;
int i, ret;
pw_thread_loop_lock(c->thread_loop);
pw_stream_set_active(v->stream, enable);
v->volume.channels = vol->channels;
for (i = 0; i < vol->channels; ++i) {
v->volume.values[i] = (float)vol->vol[i] / 255;
}
ret = pw_stream_set_control(v->stream,
SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0);
trace_pw_vol(ret == 0 ? "success" : "failed");
v->muted = vol->mute;
float val = v->muted ? 1.f : 0.f;
ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0);
pw_thread_loop_unlock(c->thread_loop);
}
static void
qpw_volume_out(HWVoiceOut *hw, Volume *vol)
{
PWVoiceOut *pw = (PWVoiceOut *) hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
int i, ret;
pw_thread_loop_lock(c->thread_loop);
v->volume.channels = vol->channels;
for (i = 0; i < vol->channels; ++i) {
v->volume.values[i] = (float)vol->vol[i] / 255;
}
ret = pw_stream_set_control(v->stream,
SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0);
trace_pw_vol(ret == 0 ? "success" : "failed");
v->muted = vol->mute;
float val = v->muted ? 1.f : 0.f;
ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0);
pw_thread_loop_unlock(c->thread_loop);
qpw_voice_set_volume(&PW_VOICE_OUT(hw)->v, vol);
}
static void
qpw_volume_in(HWVoiceIn *hw, Volume *vol)
{
PWVoiceIn *pw = (PWVoiceIn *) hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
int i, ret;
pw_thread_loop_lock(c->thread_loop);
v->volume.channels = vol->channels;
for (i = 0; i < vol->channels; ++i) {
v->volume.values[i] = (float)vol->vol[i] / 255;
}
ret = pw_stream_set_control(v->stream,
SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0);
trace_pw_vol(ret == 0 ? "success" : "failed");
v->muted = vol->mute;
float val = v->muted ? 1.f : 0.f;
ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0);
pw_thread_loop_unlock(c->thread_loop);
qpw_voice_set_volume(&PW_VOICE_IN(hw)->v, vol);
}
static int wait_resync(pwaudio *pw)
@ -760,6 +704,7 @@ static int wait_resync(pwaudio *pw)
}
return 0;
}
static void
on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
{
@ -794,27 +739,28 @@ static void *
qpw_audio_init(Audiodev *dev)
{
g_autofree pwaudio *pw = g_new0(pwaudio, 1);
assert(dev->driver == AUDIODEV_DRIVER_PIPEWIRE);
trace_pw_audio_init();
pw_init(NULL, NULL);
trace_pw_audio_init();
assert(dev->driver == AUDIODEV_DRIVER_PIPEWIRE);
pw->dev = dev;
pw->thread_loop = pw_thread_loop_new("Pipewire thread loop", NULL);
pw->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
if (pw->thread_loop == NULL) {
error_report("Could not create Pipewire loop");
error_report("Could not create PipeWire loop: %s", g_strerror(errno));
goto fail;
}
pw->context =
pw_context_new(pw_thread_loop_get_loop(pw->thread_loop), NULL, 0);
if (pw->context == NULL) {
error_report("Could not create Pipewire context");
error_report("Could not create PipeWire context: %s", g_strerror(errno));
goto fail;
}
if (pw_thread_loop_start(pw->thread_loop) < 0) {
error_report("Could not start Pipewire loop");
error_report("Could not start PipeWire loop: %s", g_strerror(errno));
goto fail;
}
@ -844,12 +790,8 @@ fail:
if (pw->thread_loop) {
pw_thread_loop_stop(pw->thread_loop);
}
if (pw->context) {
g_clear_pointer(&pw->context, pw_context_destroy);
}
if (pw->thread_loop) {
g_clear_pointer(&pw->thread_loop, pw_thread_loop_destroy);
}
return NULL;
}

View File

@ -24,7 +24,7 @@ pw_read(int32_t avail, uint32_t index, size_t len) "avail=%d index=%u len=%zu"
pw_write(int32_t filled, int32_t avail, uint32_t index, size_t len) "filled=%d avail=%d index=%u len=%zu"
pw_vol(const char *ret) "set volume: %s"
pw_period(uint64_t quantum, uint32_t rate) "period =%" PRIu64 "/%u"
pw_audio_init(void) "Initialize Pipewire context"
pw_audio_init(void) "Initialize PipeWire context"
# audio.c
audio_timer_start(int interval) "interval %d ms"

View File

@ -232,9 +232,9 @@ static void cryptodev_vhost_user_init(
backend->conf.max_auth_key_len = VHOST_USER_MAX_AUTH_KEY_LEN;
}
static int64_t cryptodev_vhost_user_sym_create_session(
static int64_t cryptodev_vhost_user_crypto_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSymSessionInfo *sess_info,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp)
{
CryptoDevBackendClient *cc =
@ -266,18 +266,17 @@ static int cryptodev_vhost_user_create_session(
void *opaque)
{
uint32_t op_code = sess_info->op_code;
CryptoDevBackendSymSessionInfo *sym_sess_info;
int64_t ret;
Error *local_error = NULL;
int status;
switch (op_code) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
sym_sess_info = &sess_info->u.sym_sess_info;
ret = cryptodev_vhost_user_sym_create_session(backend, sym_sess_info,
ret = cryptodev_vhost_user_crypto_create_session(backend, sess_info,
queue_index, &local_error);
break;

View File

@ -191,6 +191,11 @@ static int cryptodev_backend_account(CryptoDevBackend *backend,
if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
CryptoDevBackendAsymOpInfo *asym_op_info = op_info->u.asym_op_info;
len = asym_op_info->src_len;
if (unlikely(!backend->asym_stat)) {
error_report("cryptodev: Unexpected asym operation");
return -VIRTIO_CRYPTO_NOTSUPP;
}
switch (op_info->op_code) {
case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
CryptodevAsymStatIncEncrypt(backend, len);
@ -210,6 +215,11 @@ static int cryptodev_backend_account(CryptoDevBackend *backend,
} else if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
CryptoDevBackendSymOpInfo *sym_op_info = op_info->u.sym_op_info;
len = sym_op_info->src_len;
if (unlikely(!backend->sym_stat)) {
error_report("cryptodev: Unexpected sym operation");
return -VIRTIO_CRYPTO_NOTSUPP;
}
switch (op_info->op_code) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
CryptodevSymStatIncEncrypt(backend, len);
@ -522,7 +532,7 @@ static int cryptodev_backend_stats_query(Object *obj, void *data)
entry = g_new0(StatsResult, 1);
entry->provider = STATS_PROVIDER_CRYPTODEV;
entry->qom_path = g_strdup(object_get_canonical_path(obj));
entry->qom_path = object_get_canonical_path(obj);
entry->stats = stats_list;
QAPI_LIST_PREPEND(*stats_results, entry);

115
block.c
View File

@ -555,8 +555,9 @@ int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename,
* On success, return @blk's actual length.
* Otherwise, return -errno.
*/
static int64_t create_file_fallback_truncate(BlockBackend *blk,
int64_t minimum_size, Error **errp)
static int64_t coroutine_fn GRAPH_UNLOCKED
create_file_fallback_truncate(BlockBackend *blk, int64_t minimum_size,
Error **errp)
{
Error *local_err = NULL;
int64_t size;
@ -564,14 +565,14 @@ static int64_t create_file_fallback_truncate(BlockBackend *blk,
GLOBAL_STATE_CODE();
ret = blk_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0,
ret = blk_co_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0,
&local_err);
if (ret < 0 && ret != -ENOTSUP) {
error_propagate(errp, local_err);
return ret;
}
size = blk_getlength(blk);
size = blk_co_getlength(blk);
if (size < 0) {
error_free(local_err);
error_setg_errno(errp, -size,
@ -2854,7 +2855,7 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm)
* Replaces the node that a BdrvChild points to without updating permissions.
*
* If @new_bs is non-NULL, the parent of @child must already be drained through
* @child.
* @child and the caller must hold the AioContext lock for @new_bs.
*/
static void bdrv_replace_child_noperm(BdrvChild *child,
BlockDriverState *new_bs)
@ -2893,7 +2894,7 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
}
/* TODO Pull this up into the callers to avoid polling here */
bdrv_graph_wrlock();
bdrv_graph_wrlock(new_bs);
if (old_bs) {
if (child->klass->detach) {
child->klass->detach(child);
@ -2989,6 +2990,10 @@ static TransactionActionDrv bdrv_attach_child_common_drv = {
* Function doesn't update permissions, caller is responsible for this.
*
* Returns new created child.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
const char *child_name,
@ -2999,7 +3004,7 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
Transaction *tran, Error **errp)
{
BdrvChild *new_child;
AioContext *parent_ctx;
AioContext *parent_ctx, *new_child_ctx;
AioContext *child_ctx = bdrv_get_aio_context(child_bs);
assert(child_class->get_parent_desc);
@ -3050,6 +3055,12 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
}
}
new_child_ctx = bdrv_get_aio_context(child_bs);
if (new_child_ctx != child_ctx) {
aio_context_release(child_ctx);
aio_context_acquire(new_child_ctx);
}
bdrv_ref(child_bs);
/*
* Let every new BdrvChild start with a drained parent. Inserting the child
@ -3079,11 +3090,20 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
};
tran_add(tran, &bdrv_attach_child_common_drv, s);
if (new_child_ctx != child_ctx) {
aio_context_release(new_child_ctx);
aio_context_acquire(child_ctx);
}
return new_child;
}
/*
* Function doesn't update permissions, caller is responsible for this.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static BdrvChild *bdrv_attach_child_noperm(BlockDriverState *parent_bs,
BlockDriverState *child_bs,
@ -3347,6 +3367,10 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs)
* callers which don't need their own reference any more must call bdrv_unref().
*
* Function doesn't update permissions, caller is responsible for this.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs,
BlockDriverState *child_bs,
@ -3435,6 +3459,11 @@ out:
return 0;
}
/*
* The caller must hold the AioContext lock for @backing_hd. Both @bs and
* @backing_hd can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static int bdrv_set_backing_noperm(BlockDriverState *bs,
BlockDriverState *backing_hd,
Transaction *tran, Error **errp)
@ -3498,6 +3527,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
int ret = 0;
bool implicit_backing = false;
BlockDriverState *backing_hd;
AioContext *backing_hd_ctx;
QDict *options;
QDict *tmp_parent_options = NULL;
Error *local_err = NULL;
@ -3582,8 +3612,12 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
/* Hook up the backing file link; drop our reference, bs owns the
* backing_hd reference now */
backing_hd_ctx = bdrv_get_aio_context(backing_hd);
aio_context_acquire(backing_hd_ctx);
ret = bdrv_set_backing_hd(bs, backing_hd, errp);
bdrv_unref(backing_hd);
aio_context_release(backing_hd_ctx);
if (ret < 0) {
goto free_exit;
}
@ -3654,6 +3688,7 @@ done:
*
* The BlockdevRef will be removed from the options QDict.
*
* The caller must hold the lock of the main AioContext and no other AioContext.
* @parent can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
@ -3665,6 +3700,8 @@ BdrvChild *bdrv_open_child(const char *filename,
bool allow_none, Error **errp)
{
BlockDriverState *bs;
BdrvChild *child;
AioContext *ctx;
GLOBAL_STATE_CODE();
@ -3674,13 +3711,19 @@ BdrvChild *bdrv_open_child(const char *filename,
return NULL;
}
return bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
errp);
aio_context_release(ctx);
return child;
}
/*
* Wrapper on bdrv_open_child() for most popular case: open primary child of bs.
*
* The caller must hold the lock of the main AioContext and no other AioContext.
* @parent can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
@ -3757,6 +3800,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
int64_t total_size;
QemuOpts *opts = NULL;
BlockDriverState *bs_snapshot = NULL;
AioContext *ctx = bdrv_get_aio_context(bs);
int ret;
GLOBAL_STATE_CODE();
@ -3765,7 +3809,10 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
instead of opening 'filename' directly */
/* Get the required size from the image */
aio_context_acquire(ctx);
total_size = bdrv_getlength(bs);
aio_context_release(ctx);
if (total_size < 0) {
error_setg_errno(errp, -total_size, "Could not get image size");
goto out;
@ -3799,7 +3846,10 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
goto out;
}
aio_context_acquire(ctx);
ret = bdrv_append(bs_snapshot, bs, errp);
aio_context_release(ctx);
if (ret < 0) {
bs_snapshot = NULL;
goto out;
@ -3843,6 +3893,7 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
Error *local_err = NULL;
QDict *snapshot_options = NULL;
int snapshot_flags = 0;
AioContext *ctx = qemu_get_aio_context();
assert(!child_class || !flags);
assert(!child_class == !parent);
@ -3980,9 +4031,13 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
/* Not requesting BLK_PERM_CONSISTENT_READ because we're only
* looking at the header to guess the image format. This works even
* in cases where a guest would not see a consistent state. */
file = blk_new(bdrv_get_aio_context(file_bs), 0, BLK_PERM_ALL);
ctx = bdrv_get_aio_context(file_bs);
aio_context_acquire(ctx);
file = blk_new(ctx, 0, BLK_PERM_ALL);
blk_insert_bs(file, file_bs, &local_err);
bdrv_unref(file_bs);
aio_context_release(ctx);
if (local_err) {
goto fail;
}
@ -4028,8 +4083,13 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
goto fail;
}
/* The AioContext could have changed during bdrv_open_common() */
ctx = bdrv_get_aio_context(bs);
if (file) {
aio_context_acquire(ctx);
blk_unref(file);
aio_context_release(ctx);
file = NULL;
}
@ -4087,13 +4147,16 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
* (snapshot_bs); thus, we have to drop the strong reference to bs
* (which we obtained by calling bdrv_new()). bs will not be deleted,
* though, because the overlay still has a reference to it. */
aio_context_acquire(ctx);
bdrv_unref(bs);
aio_context_release(ctx);
bs = snapshot_bs;
}
return bs;
fail:
aio_context_acquire(ctx);
blk_unref(file);
qobject_unref(snapshot_options);
qobject_unref(bs->explicit_options);
@ -4102,11 +4165,14 @@ fail:
bs->options = NULL;
bs->explicit_options = NULL;
bdrv_unref(bs);
aio_context_release(ctx);
error_propagate(errp, local_err);
return NULL;
close_and_fail:
aio_context_acquire(ctx);
bdrv_unref(bs);
aio_context_release(ctx);
qobject_unref(snapshot_options);
qobject_unref(options);
error_propagate(errp, local_err);
@ -4578,6 +4644,11 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
* backing BlockDriverState (or NULL).
*
* Return 0 on success, otherwise return < 0 and set @errp.
*
* The caller must hold the AioContext lock of @reopen_state->bs.
* @reopen_state->bs can move to a different AioContext in this function.
* Callers must make sure that their AioContext locking is still correct after
* this.
*/
static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
bool is_backing, Transaction *tran,
@ -4590,6 +4661,8 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
const char *child_name = is_backing ? "backing" : "file";
QObject *value;
const char *str;
AioContext *ctx, *old_ctx;
int ret;
GLOBAL_STATE_CODE();
@ -4654,8 +4727,22 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
reopen_state->old_file_bs = old_child_bs;
}
return bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
old_ctx = bdrv_get_aio_context(bs);
ctx = bdrv_get_aio_context(new_child_bs);
if (old_ctx != ctx) {
aio_context_release(old_ctx);
aio_context_acquire(ctx);
}
ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
tran, errp);
if (old_ctx != ctx) {
aio_context_release(ctx);
aio_context_acquire(old_ctx);
}
return ret;
}
/*
@ -4674,6 +4761,7 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
* It is the responsibility of the caller to then call the abort() or
* commit() for any other BDS that have been left in a prepare() state
*
* The caller must hold the AioContext lock of @reopen_state->bs.
*/
static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue,
@ -6392,6 +6480,13 @@ int coroutine_fn bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
}
memset(bdi, 0, sizeof(*bdi));
ret = drv->bdrv_co_get_info(bs, bdi);
if (bdi->subcluster_size == 0) {
/*
* If the driver left this unset, subclusters are not supported.
* Then it is safe to treat each cluster as having only one subcluster.
*/
bdi->subcluster_size = bdi->cluster_size;
}
if (ret < 0) {
return ret;
}

View File

@ -22,16 +22,6 @@
#include "block/block-io.h"
/*
* Keep the QEMU BlockDriver names identical to the libblkio driver names.
* Using macros instead of typing out the string literals avoids typos.
*/
#define DRIVER_IO_URING "io_uring"
#define DRIVER_NVME_IO_URING "nvme-io_uring"
#define DRIVER_VIRTIO_BLK_VFIO_PCI "virtio-blk-vfio-pci"
#define DRIVER_VIRTIO_BLK_VHOST_USER "virtio-blk-vhost-user"
#define DRIVER_VIRTIO_BLK_VHOST_VDPA "virtio-blk-vhost-vdpa"
/*
* Allocated bounce buffers are kept in a list sorted by buffer address.
*/
@ -613,8 +603,8 @@ static void blkio_unregister_buf(BlockDriverState *bs, void *host, size_t size)
}
}
static int blkio_io_uring_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
static int blkio_io_uring_connect(BlockDriverState *bs, QDict *options,
int flags, Error **errp)
{
const char *filename = qdict_get_str(options, "filename");
BDRVBlkioState *s = bs->opaque;
@ -637,11 +627,18 @@ static int blkio_io_uring_open(BlockDriverState *bs, QDict *options, int flags,
}
}
ret = blkio_connect(s->blkio);
if (ret < 0) {
error_setg_errno(errp, -ret, "blkio_connect failed: %s",
blkio_get_error_msg());
return ret;
}
return 0;
}
static int blkio_nvme_io_uring(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
static int blkio_nvme_io_uring_connect(BlockDriverState *bs, QDict *options,
int flags, Error **errp)
{
const char *path = qdict_get_try_str(options, "path");
BDRVBlkioState *s = bs->opaque;
@ -665,16 +662,23 @@ static int blkio_nvme_io_uring(BlockDriverState *bs, QDict *options, int flags,
return -EINVAL;
}
ret = blkio_connect(s->blkio);
if (ret < 0) {
error_setg_errno(errp, -ret, "blkio_connect failed: %s",
blkio_get_error_msg());
return ret;
}
return 0;
}
static int blkio_virtio_blk_common_open(BlockDriverState *bs,
QDict *options, int flags, Error **errp)
static int blkio_virtio_blk_connect(BlockDriverState *bs, QDict *options,
int flags, Error **errp)
{
const char *path = qdict_get_try_str(options, "path");
BDRVBlkioState *s = bs->opaque;
bool fd_supported = false;
int fd, ret;
int fd = -1, ret;
if (!path) {
error_setg(errp, "missing 'path' option");
@ -686,7 +690,7 @@ static int blkio_virtio_blk_common_open(BlockDriverState *bs,
return -EINVAL;
}
if (blkio_get_int(s->blkio, "fd", &fd) == 0) {
if (blkio_set_int(s->blkio, "fd", -1) == 0) {
fd_supported = true;
}
@ -696,27 +700,37 @@ static int blkio_virtio_blk_common_open(BlockDriverState *bs,
* layer through the "/dev/fdset/N" special path.
*/
if (fd_supported) {
int open_flags;
if (flags & BDRV_O_RDWR) {
open_flags = O_RDWR;
} else {
open_flags = O_RDONLY;
}
fd = qemu_open(path, open_flags, errp);
/*
* `path` can contain the path of a character device
* (e.g. /dev/vhost-vdpa-0 or /dev/vfio/vfio) or a unix socket.
*
* So, we should always open it with O_RDWR flag, also if BDRV_O_RDWR
* is not set in the open flags, because the exchange of IOCTL commands
* for example will fail.
*
* In order to open the device read-only, we are using the `read-only`
* property of the libblkio driver in blkio_file_open().
*/
fd = qemu_open(path, O_RDWR, NULL);
if (fd < 0) {
return -EINVAL;
}
/*
* qemu_open() can fail if the user specifies a path that is not
* a file or device, for example in the case of Unix Domain Socket
* for the virtio-blk-vhost-user driver. In such cases let's have
* libblkio open the path directly.
*/
fd_supported = false;
} else {
ret = blkio_set_int(s->blkio, "fd", fd);
if (ret < 0) {
error_setg_errno(errp, -ret, "failed to set fd: %s",
blkio_get_error_msg());
fd_supported = false;
qemu_close(fd);
return ret;
fd = -1;
}
} else {
}
}
if (!fd_supported) {
ret = blkio_set_str(s->blkio, "path", path);
if (ret < 0) {
error_setg_errno(errp, -ret, "failed to set path: %s",
@ -725,6 +739,49 @@ static int blkio_virtio_blk_common_open(BlockDriverState *bs,
}
}
ret = blkio_connect(s->blkio);
if (ret < 0 && fd >= 0) {
/* Failed to give the FD to libblkio, close it */
qemu_close(fd);
fd = -1;
}
/*
* Before https://gitlab.com/libblkio/libblkio/-/merge_requests/208
* (libblkio <= v1.3.0), setting the `fd` property is not enough to check
* whether the driver supports the `fd` property or not. In that case,
* blkio_connect() will fail with -EINVAL.
* So let's try calling blkio_connect() again by directly setting `path`
* to cover this scenario.
*/
if (fd_supported && ret == -EINVAL) {
/*
* We need to clear the `fd` property we set previously by setting
* it to -1.
*/
ret = blkio_set_int(s->blkio, "fd", -1);
if (ret < 0) {
error_setg_errno(errp, -ret, "failed to set fd: %s",
blkio_get_error_msg());
return ret;
}
ret = blkio_set_str(s->blkio, "path", path);
if (ret < 0) {
error_setg_errno(errp, -ret, "failed to set path: %s",
blkio_get_error_msg());
return ret;
}
ret = blkio_connect(s->blkio);
}
if (ret < 0) {
error_setg_errno(errp, -ret, "blkio_connect failed: %s",
blkio_get_error_msg());
return ret;
}
qdict_del(options, "path");
return 0;
@ -744,24 +801,6 @@ static int blkio_file_open(BlockDriverState *bs, QDict *options, int flags,
return ret;
}
if (strcmp(blkio_driver, DRIVER_IO_URING) == 0) {
ret = blkio_io_uring_open(bs, options, flags, errp);
} else if (strcmp(blkio_driver, DRIVER_NVME_IO_URING) == 0) {
ret = blkio_nvme_io_uring(bs, options, flags, errp);
} else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VFIO_PCI) == 0) {
ret = blkio_virtio_blk_common_open(bs, options, flags, errp);
} else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VHOST_USER) == 0) {
ret = blkio_virtio_blk_common_open(bs, options, flags, errp);
} else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VHOST_VDPA) == 0) {
ret = blkio_virtio_blk_common_open(bs, options, flags, errp);
} else {
g_assert_not_reached();
}
if (ret < 0) {
blkio_destroy(&s->blkio);
return ret;
}
if (!(flags & BDRV_O_RDWR)) {
ret = blkio_set_bool(s->blkio, "read-only", true);
if (ret < 0) {
@ -772,10 +811,20 @@ static int blkio_file_open(BlockDriverState *bs, QDict *options, int flags,
}
}
ret = blkio_connect(s->blkio);
if (strcmp(blkio_driver, "io_uring") == 0) {
ret = blkio_io_uring_connect(bs, options, flags, errp);
} else if (strcmp(blkio_driver, "nvme-io_uring") == 0) {
ret = blkio_nvme_io_uring_connect(bs, options, flags, errp);
} else if (strcmp(blkio_driver, "virtio-blk-vfio-pci") == 0) {
ret = blkio_virtio_blk_connect(bs, options, flags, errp);
} else if (strcmp(blkio_driver, "virtio-blk-vhost-user") == 0) {
ret = blkio_virtio_blk_connect(bs, options, flags, errp);
} else if (strcmp(blkio_driver, "virtio-blk-vhost-vdpa") == 0) {
ret = blkio_virtio_blk_connect(bs, options, flags, errp);
} else {
g_assert_not_reached();
}
if (ret < 0) {
error_setg_errno(errp, -ret, "blkio_connect failed: %s",
blkio_get_error_msg());
blkio_destroy(&s->blkio);
return ret;
}
@ -855,6 +904,7 @@ static int blkio_file_open(BlockDriverState *bs, QDict *options, int flags,
QLIST_INIT(&s->bounce_bufs);
s->blkioq = blkio_get_queue(s->blkio, 0);
s->completion_fd = blkioq_get_completion_fd(s->blkioq);
blkioq_set_completion_fd_enabled(s->blkioq, true);
blkio_attach_aio_context(bs, bdrv_get_aio_context(bs));
return 0;
@ -1028,10 +1078,11 @@ static void blkio_refresh_limits(BlockDriverState *bs, Error **errp)
* - truncate
*/
#define BLKIO_DRIVER(name, ...) \
{ \
.format_name = name, \
.protocol_name = name, \
/*
* Do not include .format_name and .protocol_name because module_block.py
* does not parse macros in the source code.
*/
#define BLKIO_DRIVER_COMMON \
.instance_size = sizeof(BDRVBlkioState), \
.bdrv_file_open = blkio_file_open, \
.bdrv_close = blkio_close, \
@ -1047,30 +1098,43 @@ static void blkio_refresh_limits(BlockDriverState *bs, Error **errp)
.bdrv_co_pwrite_zeroes = blkio_co_pwrite_zeroes, \
.bdrv_refresh_limits = blkio_refresh_limits, \
.bdrv_register_buf = blkio_register_buf, \
.bdrv_unregister_buf = blkio_unregister_buf, \
__VA_ARGS__ \
}
.bdrv_unregister_buf = blkio_unregister_buf,
static BlockDriver bdrv_io_uring = BLKIO_DRIVER(
DRIVER_IO_URING,
/*
* Use the same .format_name and .protocol_name as the libblkio driver name for
* consistency.
*/
static BlockDriver bdrv_io_uring = {
.format_name = "io_uring",
.protocol_name = "io_uring",
.bdrv_needs_filename = true,
);
BLKIO_DRIVER_COMMON
};
static BlockDriver bdrv_nvme_io_uring = BLKIO_DRIVER(
DRIVER_NVME_IO_URING,
);
static BlockDriver bdrv_nvme_io_uring = {
.format_name = "nvme-io_uring",
.protocol_name = "nvme-io_uring",
BLKIO_DRIVER_COMMON
};
static BlockDriver bdrv_virtio_blk_vfio_pci = BLKIO_DRIVER(
DRIVER_VIRTIO_BLK_VFIO_PCI
);
static BlockDriver bdrv_virtio_blk_vfio_pci = {
.format_name = "virtio-blk-vfio-pci",
.protocol_name = "virtio-blk-vfio-pci",
BLKIO_DRIVER_COMMON
};
static BlockDriver bdrv_virtio_blk_vhost_user = BLKIO_DRIVER(
DRIVER_VIRTIO_BLK_VHOST_USER
);
static BlockDriver bdrv_virtio_blk_vhost_user = {
.format_name = "virtio-blk-vhost-user",
.protocol_name = "virtio-blk-vhost-user",
BLKIO_DRIVER_COMMON
};
static BlockDriver bdrv_virtio_blk_vhost_vdpa = BLKIO_DRIVER(
DRIVER_VIRTIO_BLK_VHOST_VDPA
);
static BlockDriver bdrv_virtio_blk_vhost_vdpa = {
.format_name = "virtio-blk-vhost-vdpa",
.protocol_name = "virtio-blk-vhost-vdpa",
BLKIO_DRIVER_COMMON
};
static void bdrv_blkio_init(void)
{

View File

@ -203,7 +203,8 @@ static void bochs_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
static int64_t coroutine_fn GRAPH_RDLOCK
seek_to_sector(BlockDriverState *bs, int64_t sector_num)
{
BDRVBochsState *s = bs->opaque;
uint64_t offset = sector_num * 512;
@ -224,7 +225,7 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
(s->extent_blocks + s->bitmap_blocks));
/* read in bitmap for current extent */
ret = bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8), 1,
ret = bdrv_co_pread(bs->file, bitmap_offset + (extent_offset / 8), 1,
&bitmap_entry, 0);
if (ret < 0) {
return ret;

View File

@ -212,7 +212,8 @@ static void cloop_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static inline int cloop_read_block(BlockDriverState *bs, int block_num)
static int coroutine_fn GRAPH_RDLOCK
cloop_read_block(BlockDriverState *bs, int block_num)
{
BDRVCloopState *s = bs->opaque;
@ -220,7 +221,7 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num)
int ret;
uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num];
ret = bdrv_pread(bs->file, s->offsets[block_num], bytes,
ret = bdrv_co_pread(bs->file, s->offsets[block_num], bytes,
s->compressed_block, 0);
if (ret < 0) {
return -1;
@ -244,7 +245,7 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num)
return 0;
}
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
cloop_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{

View File

@ -616,7 +616,8 @@ err:
return s->n_chunks; /* error */
}
static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
static int coroutine_fn GRAPH_RDLOCK
dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
{
BDRVDMGState *s = bs->opaque;
@ -633,7 +634,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
case UDZO: { /* zlib compressed */
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
if (ret < 0) {
return -1;
@ -659,7 +660,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
}
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
if (ret < 0) {
return -1;
@ -680,7 +681,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
}
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->compressed_chunk, 0);
if (ret < 0) {
return -1;
@ -696,7 +697,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
}
break;
case UDRW: /* copy */
ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
s->uncompressed_chunk, 0);
if (ret < 0) {
return -1;
@ -713,7 +714,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
return 0;
}
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{

View File

@ -167,7 +167,7 @@ vu_blk_set_config(VuDev *vu_dev, const uint8_t *data,
uint8_t wce;
/* don't support live migration */
if (flags != VHOST_SET_CONFIG_TYPE_MASTER) {
if (flags != VHOST_SET_CONFIG_TYPE_FRONTEND) {
return -EINVAL;
}

View File

@ -193,7 +193,7 @@ static int fd_open(BlockDriverState *bs)
return -EIO;
}
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs);
static int64_t raw_getlength(BlockDriverState *bs);
typedef struct RawPosixAIOData {
BlockDriverState *bs;
@ -1232,7 +1232,6 @@ static int hdev_get_max_hw_transfer(int fd, struct stat *st)
static int get_sysfs_str_val(struct stat *st, const char *attribute,
char **val) {
g_autofree char *sysfspath = NULL;
int ret;
size_t len;
if (!S_ISBLK(st->st_mode)) {
@ -1242,8 +1241,7 @@ static int get_sysfs_str_val(struct stat *st, const char *attribute,
sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/%s",
major(st->st_rdev), minor(st->st_rdev),
attribute);
ret = g_file_get_contents(sysfspath, val, &len, NULL);
if (ret == -1) {
if (!g_file_get_contents(sysfspath, val, &len, NULL)) {
return -ENOENT;
}
@ -1253,7 +1251,7 @@ static int get_sysfs_str_val(struct stat *st, const char *attribute,
if (*(p + len - 1) == '\n') {
*(p + len - 1) = '\0';
}
return ret;
return 0;
}
#endif
@ -1974,7 +1972,7 @@ static int handle_aiocb_write_zeroes(void *opaque)
#ifdef CONFIG_FALLOCATE
/* Last resort: we are trying to extend the file with zeroed data. This
* can be done via fallocate(fd, 0) */
len = raw_co_getlength(aiocb->bs);
len = raw_getlength(aiocb->bs);
if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) {
int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
if (ret == 0 || ret != -ENOTSUP) {
@ -2666,7 +2664,7 @@ static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
}
if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
int64_t cur_length = raw_co_getlength(bs);
int64_t cur_length = raw_getlength(bs);
if (offset != cur_length && exact) {
error_setg(errp, "Cannot resize device files");
@ -2684,7 +2682,7 @@ static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
}
#ifdef __OpenBSD__
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int fd = s->fd;
@ -2703,7 +2701,7 @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
return st.st_size;
}
#elif defined(__NetBSD__)
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int fd = s->fd;
@ -2728,7 +2726,7 @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
return st.st_size;
}
#elif defined(__sun__)
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
struct dk_minfo minfo;
@ -2759,7 +2757,7 @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
return size;
}
#elif defined(CONFIG_BSD)
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int fd = s->fd;
@ -2831,7 +2829,7 @@ again:
return size;
}
#else
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
static int64_t raw_getlength(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int ret;
@ -2850,6 +2848,11 @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
}
#endif
static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
{
return raw_getlength(bs);
}
static int64_t coroutine_fn raw_co_get_allocated_file_size(BlockDriverState *bs)
{
struct stat st;
@ -3215,7 +3218,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
* round up if necessary.
*/
if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
int64_t file_length = raw_co_getlength(bs);
int64_t file_length = raw_getlength(bs);
if (file_length > 0) {
/* Ignore errors, this is just a safeguard */
assert(hole == file_length);
@ -3237,7 +3240,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
#if defined(__linux__)
/* Verify that the file is not in the page cache */
static void coroutine_fn check_cache_dropped(BlockDriverState *bs, Error **errp)
static void check_cache_dropped(BlockDriverState *bs, Error **errp)
{
const size_t window_size = 128 * 1024 * 1024;
BDRVRawState *s = bs->opaque;
@ -3252,7 +3255,7 @@ static void coroutine_fn check_cache_dropped(BlockDriverState *bs, Error **errp)
page_size = sysconf(_SC_PAGESIZE);
vec = g_malloc(DIV_ROUND_UP(window_size, page_size));
end = raw_co_getlength(bs);
end = raw_getlength(bs);
for (offset = 0; offset < end; offset += window_size) {
void *new_window;
@ -4468,7 +4471,7 @@ static int cdrom_reopen(BlockDriverState *bs)
static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs)
{
return raw_co_getlength(bs) > 0;
return raw_getlength(bs) > 0;
}
static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag)

View File

@ -30,10 +30,8 @@ BdrvGraphLock graph_lock;
/* Protects the list of aiocontext and orphaned_reader_count */
static QemuMutex aio_context_list_lock;
#if 0
/* Written and read with atomic operations. */
static int has_writer;
#endif
/*
* A reader coroutine could move from an AioContext to another.
@ -90,7 +88,6 @@ void unregister_aiocontext(AioContext *ctx)
g_free(ctx->bdrv_graph);
}
#if 0
static uint32_t reader_count(void)
{
BdrvGraphRWlock *brdv_graph;
@ -108,19 +105,27 @@ static uint32_t reader_count(void)
assert((int32_t)rd >= 0);
return rd;
}
#endif
void bdrv_graph_wrlock(void)
void bdrv_graph_wrlock(BlockDriverState *bs)
{
AioContext *ctx = NULL;
GLOBAL_STATE_CODE();
/*
* TODO Some callers hold an AioContext lock when this is called, which
* causes deadlocks. Reenable once the AioContext locking is cleaned up (or
* AioContext locks are gone).
*/
#if 0
assert(!qatomic_read(&has_writer));
/*
* Release only non-mainloop AioContext. The mainloop often relies on the
* BQL and doesn't lock the main AioContext before doing things.
*/
if (bs) {
ctx = bdrv_get_aio_context(bs);
if (ctx != qemu_get_aio_context()) {
aio_context_release(ctx);
} else {
ctx = NULL;
}
}
/* Make sure that constantly arriving new I/O doesn't cause starvation */
bdrv_drain_all_begin_nopoll();
@ -149,13 +154,15 @@ void bdrv_graph_wrlock(void)
} while (reader_count() >= 1);
bdrv_drain_all_end();
#endif
if (ctx) {
aio_context_acquire(bdrv_get_aio_context(bs));
}
}
void bdrv_graph_wrunlock(void)
{
GLOBAL_STATE_CODE();
#if 0
QEMU_LOCK_GUARD(&aio_context_list_lock);
assert(qatomic_read(&has_writer));
@ -167,13 +174,10 @@ void bdrv_graph_wrunlock(void)
/* Wake up all coroutine that are waiting to read the graph */
qemu_co_enter_all(&reader_queue, &aio_context_list_lock);
#endif
}
void coroutine_fn bdrv_graph_co_rdlock(void)
{
/* TODO Reenable when wrlock is reenabled */
#if 0
BdrvGraphRWlock *bdrv_graph;
bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
@ -233,12 +237,10 @@ void coroutine_fn bdrv_graph_co_rdlock(void)
qemu_co_queue_wait(&reader_queue, &aio_context_list_lock);
}
}
#endif
}
void coroutine_fn bdrv_graph_co_rdunlock(void)
{
#if 0
BdrvGraphRWlock *bdrv_graph;
bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
@ -256,7 +258,6 @@ void coroutine_fn bdrv_graph_co_rdunlock(void)
if (qatomic_read(&has_writer)) {
aio_wait_kick();
}
#endif
}
void bdrv_graph_rdlock_main_loop(void)
@ -274,19 +275,13 @@ void bdrv_graph_rdunlock_main_loop(void)
void assert_bdrv_graph_readable(void)
{
/* reader_count() is slow due to aio_context_list_lock lock contention */
/* TODO Reenable when wrlock is reenabled */
#if 0
#ifdef CONFIG_DEBUG_GRAPH_LOCK
assert(qemu_in_main_thread() || reader_count());
#endif
#endif
}
void assert_bdrv_graph_writable(void)
{
assert(qemu_in_main_thread());
/* TODO Reenable when wrlock is reenabled */
#if 0
assert(qatomic_read(&has_writer));
#endif
}

View File

@ -728,21 +728,21 @@ BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
}
/**
* Round a region to cluster boundaries
* Round a region to subcluster (if supported) or cluster boundaries
*/
void coroutine_fn GRAPH_RDLOCK
bdrv_round_to_clusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *cluster_offset, int64_t *cluster_bytes)
bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *align_offset, int64_t *align_bytes)
{
BlockDriverInfo bdi;
IO_CODE();
if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
*cluster_offset = offset;
*cluster_bytes = bytes;
if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
*align_offset = offset;
*align_bytes = bytes;
} else {
int64_t c = bdi.cluster_size;
*cluster_offset = QEMU_ALIGN_DOWN(offset, c);
*cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
int64_t c = bdi.subcluster_size;
*align_offset = QEMU_ALIGN_DOWN(offset, c);
*align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
}
}
@ -1168,8 +1168,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
void *bounce_buffer = NULL;
BlockDriver *drv = bs->drv;
int64_t cluster_offset;
int64_t cluster_bytes;
int64_t align_offset;
int64_t align_bytes;
int64_t skip_bytes;
int ret;
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
@ -1203,28 +1203,28 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
* BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
* is one reason we loop rather than doing it all at once.
*/
bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
skip_bytes = offset - cluster_offset;
bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
skip_bytes = offset - align_offset;
trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
cluster_offset, cluster_bytes);
align_offset, align_bytes);
while (cluster_bytes) {
while (align_bytes) {
int64_t pnum;
if (skip_write) {
ret = 1; /* "already allocated", so nothing will be copied */
pnum = MIN(cluster_bytes, max_transfer);
pnum = MIN(align_bytes, max_transfer);
} else {
ret = bdrv_is_allocated(bs, cluster_offset,
MIN(cluster_bytes, max_transfer), &pnum);
ret = bdrv_is_allocated(bs, align_offset,
MIN(align_bytes, max_transfer), &pnum);
if (ret < 0) {
/*
* Safe to treat errors in querying allocation as if
* unallocated; we'll probably fail again soon on the
* read, but at least that will set a decent errno.
*/
pnum = MIN(cluster_bytes, max_transfer);
pnum = MIN(align_bytes, max_transfer);
}
/* Stop at EOF if the image ends in the middle of the cluster */
@ -1242,7 +1242,7 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
/* Must copy-on-read; use the bounce buffer */
pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
if (!bounce_buffer) {
int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
int64_t max_we_need = MAX(pnum, align_bytes - pnum);
int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
@ -1254,7 +1254,7 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
}
qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
ret = bdrv_driver_preadv(bs, align_offset, pnum,
&local_qiov, 0, 0);
if (ret < 0) {
goto err;
@ -1266,13 +1266,13 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
/* FIXME: Should we (perhaps conditionally) be setting
* BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
* that still correctly reads as zero? */
ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
BDRV_REQ_WRITE_UNCHANGED);
} else {
/* This does not change the data on the disk, it is not
* necessary to flush even in cache=writethrough mode.
*/
ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
ret = bdrv_driver_pwritev(bs, align_offset, pnum,
&local_qiov, 0,
BDRV_REQ_WRITE_UNCHANGED);
}
@ -1301,8 +1301,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
}
}
cluster_offset += pnum;
cluster_bytes -= pnum;
align_offset += pnum;
align_bytes -= pnum;
progress += pnum - skip_bytes;
skip_bytes = 0;
}
@ -1379,7 +1379,7 @@ bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
}
/* Forward the request to the BlockDriver, possibly fragmenting it */
total_bytes = bdrv_getlength(bs);
total_bytes = bdrv_co_getlength(bs);
if (total_bytes < 0) {
ret = total_bytes;
goto out;
@ -1710,7 +1710,11 @@ static int bdrv_pad_request(BlockDriverState *bs,
int sliced_niov;
size_t sliced_head, sliced_tail;
bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort);
/* Should have been checked by the caller already */
ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset);
if (ret < 0) {
return ret;
}
if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) {
if (padded) {
@ -1723,7 +1727,7 @@ static int bdrv_pad_request(BlockDriverState *bs,
&sliced_head, &sliced_tail,
&sliced_niov);
/* Guaranteed by bdrv_check_qiov_request() */
/* Guaranteed by bdrv_check_request32() */
assert(*bytes <= SIZE_MAX);
ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
sliced_head, *bytes);
@ -2388,7 +2392,7 @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
assert(pnum);
assert_bdrv_graph_readable();
*pnum = 0;
total_size = bdrv_getlength(bs);
total_size = bdrv_co_getlength(bs);
if (total_size < 0) {
ret = total_size;
goto early_out;
@ -2408,7 +2412,7 @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
bytes = n;
}
/* Must be non-NULL or bdrv_getlength() would have failed */
/* Must be non-NULL or bdrv_co_getlength() would have failed */
assert(bs->drv);
has_filtered_child = bdrv_filter_child(bs);
if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
@ -2546,7 +2550,7 @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
if (!cow_bs) {
ret |= BDRV_BLOCK_ZERO;
} else if (want_zero) {
int64_t size2 = bdrv_getlength(cow_bs);
int64_t size2 = bdrv_co_getlength(cow_bs);
if (size2 >= 0 && offset >= size2) {
ret |= BDRV_BLOCK_ZERO;
@ -3011,7 +3015,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
}
/* Write back cached data to the OS even with cache=unsafe */
BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
if (bs->drv->bdrv_co_flush_to_os) {
ret = bs->drv->bdrv_co_flush_to_os(bs);
if (ret < 0) {
@ -3029,7 +3033,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
goto flush_children;
}
BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
if (!bs->drv) {
/* bs->drv->bdrv_co_flush() might have ejected the BDS
* (even in case of apparent success) */
@ -3592,7 +3596,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
return ret;
}
old_size = bdrv_getlength(bs);
old_size = bdrv_co_getlength(bs);
if (old_size < 0) {
error_setg_errno(errp, -old_size, "Failed to get old image size");
return old_size;

View File

@ -283,7 +283,7 @@ static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
s->cow_bitmap);
if (need_cow) {
bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes,
&align_offset, &align_bytes);
}
@ -576,7 +576,7 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
int64_t target_offset;
int64_t target_bytes;
WITH_GRAPH_RDLOCK_GUARD() {
bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes,
&target_offset, &target_bytes);
}
if (target_offset == offset &&

View File

@ -2,7 +2,7 @@
* QEMU Block driver for NBD
*
* Copyright (c) 2019 Virtuozzo International GmbH.
* Copyright (C) 2016 Red Hat, Inc.
* Copyright Red Hat
* Copyright (C) 2008 Bull S.A.S.
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
*
@ -50,8 +50,8 @@
#define EN_OPTSTR ":exportname="
#define MAX_NBD_REQUESTS 16
#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
#define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
#define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
#define INDEX_TO_COOKIE(index) ((index) + 1)
typedef struct {
Coroutine *coroutine;
@ -417,25 +417,25 @@ static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s)
reconnect_delay_timer_del(s);
}
static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie)
{
int ret;
uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2;
uint64_t ind = COOKIE_TO_INDEX(cookie), ind2;
QEMU_LOCK_GUARD(&s->receive_mutex);
while (true) {
if (s->reply.handle == handle) {
if (s->reply.cookie == cookie) {
/* We are done */
return 0;
}
if (s->reply.handle != 0) {
if (s->reply.cookie != 0) {
/*
* Some other request is being handled now. It should already be
* woken by whoever set s->reply.handle (or never wait in this
* woken by whoever set s->reply.cookie (or never wait in this
* yield). So, we should not wake it here.
*/
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
ind2 = COOKIE_TO_INDEX(s->reply.cookie);
assert(!s->requests[ind2].receiving);
s->requests[ind].receiving = true;
@ -445,9 +445,9 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
/*
* We may be woken for 2 reasons:
* 1. From this function, executing in parallel coroutine, when our
* handle is received.
* cookie is received.
* 2. From nbd_co_receive_one_chunk(), when previous request is
* finished and s->reply.handle set to 0.
* finished and s->reply.cookie set to 0.
* Anyway, it's OK to lock the mutex and go to the next iteration.
*/
@ -456,8 +456,8 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
continue;
}
/* We are under mutex and handle is 0. We have to do the dirty work. */
assert(s->reply.handle == 0);
/* We are under mutex and cookie is 0. We have to do the dirty work. */
assert(s->reply.cookie == 0);
ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL);
if (ret <= 0) {
ret = ret ? ret : -EIO;
@ -468,12 +468,12 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
nbd_channel_error(s, -EINVAL);
return -EINVAL;
}
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
ind2 = COOKIE_TO_INDEX(s->reply.cookie);
if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) {
nbd_channel_error(s, -EINVAL);
return -EINVAL;
}
if (s->reply.handle == handle) {
if (s->reply.cookie == cookie) {
/* We are done */
return 0;
}
@ -519,7 +519,7 @@ nbd_co_send_request(BlockDriverState *bs, NBDRequest *request,
qemu_mutex_unlock(&s->requests_lock);
qemu_co_mutex_lock(&s->send_mutex);
request->handle = INDEX_TO_HANDLE(s, i);
request->cookie = INDEX_TO_COOKIE(i);
assert(s->ioc);
@ -828,11 +828,11 @@ static coroutine_fn int nbd_co_receive_structured_payload(
* corresponding to the server's error reply), and errp is unchanged.
*/
static coroutine_fn int nbd_co_do_receive_one_chunk(
BDRVNBDState *s, uint64_t handle, bool only_structured,
BDRVNBDState *s, uint64_t cookie, bool only_structured,
int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
{
int ret;
int i = HANDLE_TO_INDEX(s, handle);
int i = COOKIE_TO_INDEX(cookie);
void *local_payload = NULL;
NBDStructuredReplyChunk *chunk;
@ -841,14 +841,14 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
}
*request_ret = 0;
ret = nbd_receive_replies(s, handle);
ret = nbd_receive_replies(s, cookie);
if (ret < 0) {
error_setg(errp, "Connection closed");
return -EIO;
}
assert(s->ioc);
assert(s->reply.handle == handle);
assert(s->reply.cookie == cookie);
if (nbd_reply_is_simple(&s->reply)) {
if (only_structured) {
@ -918,11 +918,11 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
* Return value is a fatal error code or normal nbd reply error code
*/
static coroutine_fn int nbd_co_receive_one_chunk(
BDRVNBDState *s, uint64_t handle, bool only_structured,
BDRVNBDState *s, uint64_t cookie, bool only_structured,
int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
Error **errp)
{
int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
int ret = nbd_co_do_receive_one_chunk(s, cookie, only_structured,
request_ret, qiov, payload, errp);
if (ret < 0) {
@ -932,7 +932,7 @@ static coroutine_fn int nbd_co_receive_one_chunk(
/* For assert at loop start in nbd_connection_entry */
*reply = s->reply;
}
s->reply.handle = 0;
s->reply.cookie = 0;
nbd_recv_coroutines_wake(s);
@ -975,10 +975,10 @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
* NBD_FOREACH_REPLY_CHUNK
* The pointer stored in @payload requires g_free() to free it.
*/
#define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
#define NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, structured, \
qiov, reply, payload) \
for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
nbd_reply_chunk_iter_receive(s, &iter, cookie, qiov, reply, payload);)
/*
* nbd_reply_chunk_iter_receive
@ -986,7 +986,7 @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
*/
static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
NBDReplyChunkIter *iter,
uint64_t handle,
uint64_t cookie,
QEMUIOVector *qiov,
NBDReply *reply,
void **payload)
@ -1005,7 +1005,7 @@ static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
reply = &local_reply;
}
ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
ret = nbd_co_receive_one_chunk(s, cookie, iter->only_structured,
&request_ret, qiov, reply, payload,
&local_err);
if (ret < 0) {
@ -1038,7 +1038,7 @@ static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
break_loop:
qemu_mutex_lock(&s->requests_lock);
s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
s->requests[COOKIE_TO_INDEX(cookie)].coroutine = NULL;
s->in_flight--;
qemu_co_queue_next(&s->free_sema);
qemu_mutex_unlock(&s->requests_lock);
@ -1046,12 +1046,13 @@ break_loop:
return false;
}
static int coroutine_fn nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
static int coroutine_fn
nbd_co_receive_return_code(BDRVNBDState *s, uint64_t cookie,
int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, NULL, NULL) {
/* nbd_reply_chunk_iter_receive does all the work */
}
@ -1060,7 +1061,8 @@ static int coroutine_fn nbd_co_receive_return_code(BDRVNBDState *s, uint64_t han
return iter.ret;
}
static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
static int coroutine_fn
nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
uint64_t offset, QEMUIOVector *qiov,
int *request_ret, Error **errp)
{
@ -1069,7 +1071,7 @@ static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t h
void *payload = NULL;
Error *local_err = NULL;
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, s->info.structured_reply,
qiov, &reply, &payload)
{
int ret;
@ -1112,9 +1114,9 @@ static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t h
return iter.ret;
}
static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
uint64_t handle, uint64_t length,
NBDExtent *extent,
static int coroutine_fn
nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
uint64_t length, NBDExtent *extent,
int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
@ -1124,7 +1126,7 @@ static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
bool received = false;
assert(!extent->length);
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) {
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, &reply, &payload) {
int ret;
NBDStructuredReplyChunk *chunk = &reply.structured;
@ -1194,11 +1196,11 @@ nbd_co_request(BlockDriverState *bs, NBDRequest *request,
continue;
}
ret = nbd_co_receive_return_code(s, request->handle,
ret = nbd_co_receive_return_code(s, request->cookie,
&request_ret, &local_err);
if (local_err) {
trace_nbd_co_request_fail(request->from, request->len,
request->handle, request->flags,
request->cookie, request->flags,
request->type,
nbd_cmd_lookup(request->type),
ret, error_get_pretty(local_err));
@ -1253,10 +1255,10 @@ nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
continue;
}
ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov,
ret = nbd_co_receive_cmdread_reply(s, request.cookie, offset, qiov,
&request_ret, &local_err);
if (local_err) {
trace_nbd_co_request_fail(request.from, request.len, request.handle,
trace_nbd_co_request_fail(request.from, request.len, request.cookie,
request.flags, request.type,
nbd_cmd_lookup(request.type),
ret, error_get_pretty(local_err));
@ -1411,11 +1413,11 @@ static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
continue;
}
ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes,
ret = nbd_co_receive_blockstatus_reply(s, request.cookie, bytes,
&extent, &request_ret,
&local_err);
if (local_err) {
trace_nbd_co_request_fail(request.from, request.len, request.handle,
trace_nbd_co_request_fail(request.from, request.len, request.cookie,
request.flags, request.type,
nbd_cmd_lookup(request.type),
ret, error_get_pretty(local_err));

View File

@ -501,8 +501,9 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
q->need_kick++;
blk_io_plug_call(nvme_unplug_fn, q);
qemu_mutex_unlock(&q->lock);
blk_io_plug_call(nvme_unplug_fn, q);
}
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)

View File

@ -200,7 +200,7 @@ allocate_clusters(BlockDriverState *bs, int64_t sector_num,
assert(idx < s->bat_size && idx + to_allocate <= s->bat_size);
space = to_allocate * s->tracks;
len = bdrv_getlength(bs->file->bs);
len = bdrv_co_getlength(bs->file->bs);
if (len < 0) {
return len;
}
@ -448,7 +448,7 @@ parallels_check_outside_image(BlockDriverState *bs, BdrvCheckResult *res,
uint32_t i;
int64_t off, high_off, size;
size = bdrv_getlength(bs->file->bs);
size = bdrv_co_getlength(bs->file->bs);
if (size < 0) {
res->check_errors++;
return size;

View File

@ -370,7 +370,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
if (!allocate)
return 0;
/* allocate a new l2 entry */
l2_offset = bdrv_getlength(bs->file->bs);
l2_offset = bdrv_co_getlength(bs->file->bs);
if (l2_offset < 0) {
return l2_offset;
}
@ -379,7 +379,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
/* update the L1 entry */
s->l1_table[l1_index] = l2_offset;
tmp = cpu_to_be64(l2_offset);
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_UPDATE);
ret = bdrv_co_pwrite_sync(bs->file,
s->l1_table_offset + l1_index * sizeof(tmp),
sizeof(tmp), &tmp, 0);
@ -410,7 +410,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
}
}
l2_table = s->l2_cache + (min_index << s->l2_bits);
BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_LOAD);
if (new_l2_table) {
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
ret = bdrv_co_pwrite_sync(bs->file, l2_offset,
@ -434,7 +434,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) {
if (!allocate)
return 0;
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
assert(QEMU_IS_ALIGNED(n_start | n_end, BDRV_SECTOR_SIZE));
/* allocate a new cluster */
if ((cluster_offset & QCOW_OFLAG_COMPRESSED) &&
@ -445,20 +445,20 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
if (decompress_cluster(bs, cluster_offset) < 0) {
return -EIO;
}
cluster_offset = bdrv_getlength(bs->file->bs);
cluster_offset = bdrv_co_getlength(bs->file->bs);
if ((int64_t) cluster_offset < 0) {
return cluster_offset;
}
cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size);
/* write the cluster content */
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwrite(bs->file, cluster_offset, s->cluster_size,
s->cluster_cache, 0);
if (ret < 0) {
return ret;
}
} else {
cluster_offset = bdrv_getlength(bs->file->bs);
cluster_offset = bdrv_co_getlength(bs->file->bs);
if ((int64_t) cluster_offset < 0) {
return cluster_offset;
}
@ -491,7 +491,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
NULL) < 0) {
return -EIO;
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwrite(bs->file, cluster_offset + i,
BDRV_SECTOR_SIZE,
s->cluster_data, 0);
@ -510,9 +510,9 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
tmp = cpu_to_be64(cluster_offset);
l2_table[l2_index] = tmp;
if (allocate == 2) {
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
} else {
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE);
}
ret = bdrv_co_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
sizeof(tmp), &tmp, 0);
@ -595,7 +595,7 @@ decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
if (s->cluster_cache_offset != coffset) {
csize = cluster_offset >> (63 - s->cluster_bits);
csize &= (s->cluster_size - 1);
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_co_pread(bs->file, coffset, csize, s->cluster_data, 0);
if (ret < 0)
return -1;
@ -657,7 +657,7 @@ qcow_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
/* read from the base image */
qemu_co_mutex_unlock(&s->lock);
/* qcow2 emits this on bs->file instead of bs->backing */
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
ret = bdrv_co_pread(bs->backing, offset, n, buf, 0);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
@ -680,7 +680,7 @@ qcow_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
break;
}
qemu_co_mutex_unlock(&s->lock);
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_pread(bs->file, cluster_offset + offset_in_cluster,
n, buf, 0);
qemu_co_mutex_lock(&s->lock);
@ -765,7 +765,7 @@ qcow_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
}
qemu_co_mutex_unlock(&s->lock);
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwrite(bs->file, cluster_offset + offset_in_cluster,
n, buf, 0);
qemu_co_mutex_lock(&s->lock);
@ -1114,7 +1114,7 @@ qcow_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
}
cluster_offset &= s->cluster_offset_mask;
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
ret = bdrv_co_pwrite(bs->file, cluster_offset, out_len, out_buf, 0);
if (ret < 0) {
goto fail;

View File

@ -283,10 +283,9 @@ static int free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
/* load_bitmap_data
* @bitmap_table entries must satisfy specification constraints.
* @bitmap must be cleared */
static int load_bitmap_data(BlockDriverState *bs,
const uint64_t *bitmap_table,
uint32_t bitmap_table_size,
BdrvDirtyBitmap *bitmap)
static int coroutine_fn GRAPH_RDLOCK
load_bitmap_data(BlockDriverState *bs, const uint64_t *bitmap_table,
uint32_t bitmap_table_size, BdrvDirtyBitmap *bitmap)
{
int ret = 0;
BDRVQcow2State *s = bs->opaque;
@ -319,7 +318,7 @@ static int load_bitmap_data(BlockDriverState *bs,
* already cleared */
}
} else {
ret = bdrv_pread(bs->file, data_offset, s->cluster_size, buf, 0);
ret = bdrv_co_pread(bs->file, data_offset, s->cluster_size, buf, 0);
if (ret < 0) {
goto finish;
}
@ -337,7 +336,8 @@ finish:
return ret;
}
static BdrvDirtyBitmap *load_bitmap(BlockDriverState *bs,
static coroutine_fn GRAPH_RDLOCK
BdrvDirtyBitmap *load_bitmap(BlockDriverState *bs,
Qcow2Bitmap *bm, Error **errp)
{
int ret;
@ -649,7 +649,8 @@ fail:
return NULL;
}
int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
int coroutine_fn
qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size)
{
@ -957,7 +958,8 @@ static void set_readonly_helper(gpointer bitmap, gpointer value)
* If header_updated is not NULL then it is set appropriately regardless of
* the return value.
*/
bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs,
bool coroutine_fn GRAPH_RDLOCK
qcow2_load_dirty_bitmaps(BlockDriverState *bs,
bool *header_updated, Error **errp)
{
BDRVQcow2State *s = bs->opaque;

View File

@ -48,7 +48,7 @@ int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs,
fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
#endif
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
ret = bdrv_co_pwrite_zeroes(bs->file,
s->l1_table_offset + new_l1_size * L1E_SIZE,
(s->l1_size - new_l1_size) * L1E_SIZE, 0);
@ -61,7 +61,7 @@ int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs,
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
continue;
@ -501,7 +501,7 @@ do_perform_cow_read(BlockDriverState *bs, uint64_t src_cluster_offset,
return 0;
}
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
BLKDBG_CO_EVENT(bs->file, BLKDBG_COW_READ);
if (!bs->drv) {
return -ENOMEDIUM;
@ -551,7 +551,7 @@ do_perform_cow_write(BlockDriverState *bs, uint64_t cluster_offset,
return ret;
}
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster,
qiov->size, qiov, 0);
if (ret < 0) {
@ -823,10 +823,9 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
*
* Return 0 on success and -errno in error cases
*/
int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,
uint64_t *host_offset)
int coroutine_fn GRAPH_RDLOCK
qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset,
int compressed_size, uint64_t *host_offset)
{
BDRVQcow2State *s = bs->opaque;
int l2_index, ret;
@ -872,7 +871,7 @@ int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
/* compressed clusters never have the copied flag */
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
set_l2_entry(s, l2_slice, l2_index, cluster_offset);
if (has_subclusters(s)) {
@ -992,7 +991,7 @@ perform_cow(BlockDriverState *bs, QCowL2Meta *m)
/* NOTE: we have a write_aio blkdebug event here followed by
* a cow_write one in do_perform_cow_write(), but there's only
* one single I/O operation */
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
} else {
/* If there's no guest data then write both COW regions separately */
@ -2038,7 +2037,8 @@ fail:
* all clusters in the same L2 slice) and returns the number of zeroed
* clusters.
*/
static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
static int coroutine_fn
zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
uint64_t nb_clusters, int flags)
{
BDRVQcow2State *s = bs->opaque;

View File

@ -118,7 +118,7 @@ int coroutine_fn qcow2_refcount_init(BlockDriverState *bs)
ret = -ENOMEM;
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
BLKDBG_CO_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
ret = bdrv_co_pread(bs->file, s->refcount_table_offset,
refcount_table_size2, s->refcount_table, 0);
if (ret < 0) {
@ -1069,14 +1069,14 @@ int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offs
/* only used to allocate compressed sectors. We try to allocate
contiguous sectors. size must be <= cluster_size */
int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size)
int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size)
{
BDRVQcow2State *s = bs->opaque;
int64_t offset;
size_t free_in_cluster;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
assert(size > 0 && size <= s->cluster_size);
assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset));
@ -1524,7 +1524,8 @@ static int realloc_refcount_array(BDRVQcow2State *s, void **array,
*
* Modifies the number of errors in res.
*/
int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
int coroutine_fn GRAPH_RDLOCK
qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size)
@ -1538,7 +1539,7 @@ int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
return 0;
}
file_len = bdrv_getlength(bs->file->bs);
file_len = bdrv_co_getlength(bs->file->bs);
if (file_len < 0) {
return file_len;
}
@ -1600,9 +1601,10 @@ enum {
*
* On failure in-memory @l2_table may be modified.
*/
static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
uint64_t l2_offset,
uint64_t *l2_table, int l2_index, bool active,
static int coroutine_fn GRAPH_RDLOCK
fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
uint64_t l2_offset, uint64_t *l2_table,
int l2_index, bool active,
bool *metadata_overlap)
{
BDRVQcow2State *s = bs->opaque;
@ -1634,7 +1636,7 @@ static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
goto fail;
}
ret = bdrv_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s),
ret = bdrv_co_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s),
&l2_table[idx], 0);
if (ret < 0) {
fprintf(stderr, "ERROR: Failed to overwrite L2 "
@ -1659,7 +1661,8 @@ fail:
* Returns the number of errors found by the checks or -errno if an internal
* error occurred.
*/
static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
static int coroutine_fn GRAPH_RDLOCK
check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size, int64_t l2_offset,
int flags, BdrvCheckMode fix, bool active)
@ -1673,7 +1676,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
bool metadata_overlap;
/* Read L2 table from disk */
ret = bdrv_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0);
ret = bdrv_co_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
res->check_errors++;
@ -1858,10 +1861,9 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
* Returns the number of errors found by the checks or -errno if an internal
* error occurred.
*/
static int check_refcounts_l1(BlockDriverState *bs,
BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
static int coroutine_fn GRAPH_RDLOCK
check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table, int64_t *refcount_table_size,
int64_t l1_table_offset, int l1_size,
int flags, BdrvCheckMode fix, bool active)
{
@ -1889,7 +1891,7 @@ static int check_refcounts_l1(BlockDriverState *bs,
}
/* Read L1 table entries from disk */
ret = bdrv_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0);
ret = bdrv_co_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
res->check_errors++;
@ -1949,8 +1951,8 @@ static int check_refcounts_l1(BlockDriverState *bs,
* have been already detected and sufficiently signaled by the calling function
* (qcow2_check_refcounts) by the time this function is called).
*/
static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix)
static int coroutine_fn GRAPH_RDLOCK
check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
{
BDRVQcow2State *s = bs->opaque;
uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size);
@ -2005,7 +2007,7 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
}
}
ret = bdrv_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s),
ret = bdrv_co_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s),
l2_table, 0);
if (ret < 0) {
fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
@ -2059,8 +2061,7 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
goto fail;
}
ret = bdrv_pwrite(bs->file, l2_offset, s->cluster_size, l2_table,
0);
ret = bdrv_co_pwrite(bs->file, l2_offset, s->cluster_size, l2_table, 0);
if (ret < 0) {
fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
strerror(-ret));
@ -2083,7 +2084,8 @@ fail:
* Checks consistency of refblocks and accounts for each refblock in
* *refcount_table.
*/
static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
static int coroutine_fn GRAPH_RDLOCK
check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
void **refcount_table, int64_t *nb_clusters)
{
@ -2127,13 +2129,13 @@ static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
goto resize_fail;
}
ret = bdrv_truncate(bs->file, offset + s->cluster_size, false,
ret = bdrv_co_truncate(bs->file, offset + s->cluster_size, false,
PREALLOC_MODE_OFF, 0, &local_err);
if (ret < 0) {
error_report_err(local_err);
goto resize_fail;
}
size = bdrv_getlength(bs->file->bs);
size = bdrv_co_getlength(bs->file->bs);
if (size < 0) {
ret = size;
goto resize_fail;
@ -2197,7 +2199,8 @@ resize_fail:
/*
* Calculates an in-memory refcount table.
*/
static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
static int coroutine_fn GRAPH_RDLOCK
calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
void **refcount_table, int64_t *nb_clusters)
{
@ -2299,7 +2302,8 @@ static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
* Compares the actual reference count for each cluster in the image against the
* refcount as reported by the refcount structures on-disk.
*/
static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
static void coroutine_fn
compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
int64_t *highest_cluster,
void *refcount_table, int64_t nb_clusters)
@ -2463,7 +2467,8 @@ static int64_t alloc_clusters_imrt(BlockDriverState *bs,
* Return whether the on-disk reftable array was resized (true/false),
* or -errno on error.
*/
static int rebuild_refcounts_write_refblocks(
static int coroutine_fn GRAPH_RDLOCK
rebuild_refcounts_write_refblocks(
BlockDriverState *bs, void **refcount_table, int64_t *nb_clusters,
int64_t first_cluster, int64_t end_cluster,
uint64_t **on_disk_reftable_ptr, uint32_t *on_disk_reftable_entries_ptr,
@ -2578,7 +2583,7 @@ static int rebuild_refcounts_write_refblocks(
on_disk_refblock = (void *)((char *) *refcount_table +
refblock_index * s->cluster_size);
ret = bdrv_pwrite(bs->file, refblock_offset, s->cluster_size,
ret = bdrv_co_pwrite(bs->file, refblock_offset, s->cluster_size,
on_disk_refblock, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR writing refblock");
@ -2601,10 +2606,9 @@ static int rebuild_refcounts_write_refblocks(
* On success, the old refcount structure is leaked (it will be covered by the
* new refcount structure).
*/
static int rebuild_refcount_structure(BlockDriverState *bs,
BdrvCheckResult *res,
void **refcount_table,
int64_t *nb_clusters,
static int coroutine_fn GRAPH_RDLOCK
rebuild_refcount_structure(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table, int64_t *nb_clusters,
Error **errp)
{
BDRVQcow2State *s = bs->opaque;
@ -2734,7 +2738,7 @@ static int rebuild_refcount_structure(BlockDriverState *bs,
}
assert(reftable_length < INT_MAX);
ret = bdrv_pwrite(bs->file, reftable_offset, reftable_length,
ret = bdrv_co_pwrite(bs->file, reftable_offset, reftable_length,
on_disk_reftable, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR writing reftable");
@ -2745,7 +2749,7 @@ static int rebuild_refcount_structure(BlockDriverState *bs,
reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
reftable_offset_and_clusters.reftable_clusters =
cpu_to_be32(reftable_clusters);
ret = bdrv_pwrite_sync(bs->file,
ret = bdrv_co_pwrite_sync(bs->file,
offsetof(QCowHeader, refcount_table_offset),
sizeof(reftable_offset_and_clusters),
&reftable_offset_and_clusters, 0);
@ -2777,8 +2781,8 @@ fail:
* Returns 0 if no errors are found, the number of errors in case the image is
* detected as corrupted, and -errno when an internal error occurred.
*/
int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix)
int coroutine_fn GRAPH_RDLOCK
qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
{
BDRVQcow2State *s = bs->opaque;
BdrvCheckResult pre_compare_res;
@ -2787,7 +2791,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
bool rebuild = false;
int ret;
size = bdrv_getlength(bs->file->bs);
size = bdrv_co_getlength(bs->file->bs);
if (size < 0) {
res->check_errors++;
return size;
@ -3541,7 +3545,8 @@ done:
return ret;
}
static int64_t get_refblock_offset(BlockDriverState *bs, uint64_t offset)
static int64_t coroutine_fn get_refblock_offset(BlockDriverState *bs,
uint64_t offset)
{
BDRVQcow2State *s = bs->opaque;
uint32_t index = offset_to_reftable_index(s, offset);
@ -3707,7 +3712,8 @@ int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size)
return -EIO;
}
int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs)
int coroutine_fn GRAPH_RDLOCK
qcow2_detect_metadata_preallocation(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
int64_t i, end_cluster, cluster_count = 0, threshold;

View File

@ -570,7 +570,7 @@ int qcow2_mark_corrupt(BlockDriverState *bs)
* Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
* before if necessary.
*/
int qcow2_mark_consistent(BlockDriverState *bs)
static int coroutine_fn qcow2_mark_consistent(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
@ -2225,7 +2225,7 @@ qcow2_co_preadv_encrypted(BlockDriverState *bs,
return -ENOMEM;
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0);
if (ret < 0) {
goto fail;
@ -2315,7 +2315,7 @@ qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type,
case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
return bdrv_co_preadv_part(bs->backing, offset, bytes,
qiov, qiov_offset, 0);
@ -2329,7 +2329,7 @@ qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type,
offset, bytes, qiov, qiov_offset);
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
return bdrv_co_preadv_part(s->data_file, host_offset,
bytes, qiov, qiov_offset, 0);
@ -2539,7 +2539,7 @@ handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
return ret;
}
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes,
BDRV_REQ_NO_FALLBACK);
if (ret < 0) {
@ -2604,7 +2604,7 @@ int qcow2_co_pwritev_task(BlockDriverState *bs, uint64_t host_offset,
* guest data now.
*/
if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) {
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(), host_offset);
ret = bdrv_co_pwritev_part(s->data_file, host_offset,
bytes, qiov, qiov_offset, 0);
@ -4678,7 +4678,7 @@ qcow2_co_pwritev_compressed_task(BlockDriverState *bs,
goto fail;
}
BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED);
BLKDBG_CO_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED);
ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0);
if (ret < 0) {
goto fail;
@ -4797,7 +4797,7 @@ qcow2_co_preadv_compressed(BlockDriverState *bs,
out_buf = qemu_blockalign(bs, s->cluster_size);
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0);
if (ret < 0) {
goto fail;
@ -5197,6 +5197,7 @@ qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
{
BDRVQcow2State *s = bs->opaque;
bdi->cluster_size = s->cluster_size;
bdi->subcluster_size = s->subcluster_size;
bdi->vm_state_offset = qcow2_vm_state_offset(s);
bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY;
return 0;
@ -5344,7 +5345,7 @@ qcow2_co_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
return offset;
}
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0);
}
@ -5356,7 +5357,7 @@ qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
return offset;
}
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0);
}

View File

@ -836,7 +836,6 @@ int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
int qcow2_mark_dirty(BlockDriverState *bs);
int qcow2_mark_corrupt(BlockDriverState *bs);
int qcow2_mark_consistent(BlockDriverState *bs);
int qcow2_update_header(BlockDriverState *bs);
void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
@ -867,7 +866,7 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
int64_t nb_clusters);
int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size);
int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size);
void qcow2_free_clusters(BlockDriverState *bs,
int64_t offset, int64_t size,
enum qcow2_discard_type type);
@ -879,7 +878,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
int qcow2_flush_caches(BlockDriverState *bs);
int qcow2_write_caches(BlockDriverState *bs);
int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
int coroutine_fn qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix);
void qcow2_process_discards(BlockDriverState *bs, int ret);
@ -888,7 +887,7 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
int64_t size);
int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
int64_t size, bool data_file);
int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
int coroutine_fn qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size);
@ -919,10 +918,9 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes,
uint64_t *host_offset, QCowL2Meta **m);
int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,
uint64_t *host_offset);
int coroutine_fn GRAPH_RDLOCK
qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset,
int compressed_size, uint64_t *host_offset);
void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
uint64_t *coffset, int *csize);
@ -992,11 +990,12 @@ void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset);
void qcow2_cache_discard(Qcow2Cache *c, void *table);
/* qcow2-bitmap.c functions */
int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
int coroutine_fn
qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size);
bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs,
bool *header_updated, Error **errp);
bool coroutine_fn GRAPH_RDLOCK
qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated, Error **errp);
bool qcow2_get_bitmap_info_list(BlockDriverState *bs,
Qcow2BitmapInfoList **info_list, Error **errp);
int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);

View File

@ -200,7 +200,8 @@ static void qed_check_for_leaks(QEDCheck *check)
/**
* Mark an image clean once it passes check or has been repaired
*/
static void qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result)
static void coroutine_fn GRAPH_RDLOCK
qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result)
{
/* Skip if there were unfixable corruptions or I/O errors */
if (result->corruptions > 0 || result->check_errors > 0) {
@ -213,7 +214,7 @@ static void qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result)
}
/* Ensure fixes reach storage before clearing check bit */
bdrv_flush(s->bs);
bdrv_co_flush(s->bs);
s->header.features &= ~QED_F_NEED_CHECK;
qed_write_header_sync(s);

View File

@ -122,7 +122,7 @@ int coroutine_fn qed_read_l1_table_sync(BDRVQEDState *s)
int coroutine_fn qed_write_l1_table(BDRVQEDState *s, unsigned int index,
unsigned int n)
{
BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
return qed_write_table(s, s->header.l1_table_offset,
s->l1_table, index, n, false);
}
@ -150,7 +150,7 @@ int coroutine_fn qed_read_l2_table(BDRVQEDState *s, QEDRequest *request,
request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
request->l2_table->table = qed_alloc_table(s);
BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L2_LOAD);
ret = qed_read_table(s, offset, request->l2_table->table);
if (ret) {
@ -183,7 +183,7 @@ int coroutine_fn qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n,
bool flush)
{
BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
return qed_write_table(s, request->l2_table->offset,
request->l2_table->table, index, n, flush);
}

View File

@ -195,14 +195,15 @@ static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
*
* The string is NUL-terminated.
*/
static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
char *buf, size_t buflen)
static int coroutine_fn GRAPH_RDLOCK
qed_read_string(BdrvChild *file, uint64_t offset,
size_t n, char *buf, size_t buflen)
{
int ret;
if (n >= buflen) {
return -EINVAL;
}
ret = bdrv_pread(file, offset, n, buf, 0);
ret = bdrv_co_pread(file, offset, n, buf, 0);
if (ret < 0) {
return ret;
}
@ -882,7 +883,7 @@ static int coroutine_fn GRAPH_RDLOCK
qed_read_backing_file(BDRVQEDState *s, uint64_t pos, QEMUIOVector *qiov)
{
if (s->bs->backing) {
BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
return bdrv_co_preadv(s->bs->backing, pos, qiov->size, qiov, 0);
}
qemu_iovec_memset(qiov, 0, 0, qiov->size);
@ -917,7 +918,7 @@ qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, uint64_t len,
goto out;
}
BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
if (ret < 0) {
goto out;
@ -1069,7 +1070,7 @@ static int coroutine_fn GRAPH_RDLOCK qed_aio_write_main(QEDAIOCB *acb)
trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
&acb->cur_qiov, 0);
}
@ -1323,7 +1324,7 @@ qed_aio_read_data(void *opaque, int ret, uint64_t offset, size_t len)
} else if (ret != QED_CLUSTER_FOUND) {
r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov);
} else {
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
&acb->cur_qiov, 0);
}

View File

@ -214,7 +214,7 @@ raw_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
return ret;
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
}
@ -268,7 +268,7 @@ raw_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
fail:

View File

@ -169,7 +169,8 @@ exit:
* It is assumed that 'buffer' is at least 4096*num_sectors large.
*
* 0 is returned on success, -errno otherwise */
static int vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log,
static int coroutine_fn GRAPH_RDLOCK
vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log,
uint32_t *sectors_written, void *buffer,
uint32_t num_sectors)
{
@ -195,8 +196,7 @@ static int vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log,
/* full */
break;
}
ret = bdrv_pwrite(bs->file, offset, VHDX_LOG_SECTOR_SIZE, buffer_tmp,
0);
ret = bdrv_co_pwrite(bs->file, offset, VHDX_LOG_SECTOR_SIZE, buffer_tmp, 0);
if (ret < 0) {
goto exit;
}
@ -853,7 +853,8 @@ static void vhdx_log_raw_to_le_sector(VHDXLogDescriptor *desc,
}
static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
static int coroutine_fn GRAPH_RDLOCK
vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset)
{
int ret = 0;
@ -924,7 +925,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
sectors += partial_sectors;
file_length = bdrv_getlength(bs->file->bs);
file_length = bdrv_co_getlength(bs->file->bs);
if (file_length < 0) {
ret = file_length;
goto exit;
@ -971,7 +972,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
if (i == 0 && leading_length) {
/* partial sector at the front of the buffer */
ret = bdrv_pread(bs->file, file_offset, VHDX_LOG_SECTOR_SIZE,
ret = bdrv_co_pread(bs->file, file_offset, VHDX_LOG_SECTOR_SIZE,
merged_sector, 0);
if (ret < 0) {
goto exit;
@ -981,7 +982,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
sector_write = merged_sector;
} else if (i == sectors - 1 && trailing_length) {
/* partial sector at the end of the buffer */
ret = bdrv_pread(bs->file, file_offset + trailing_length,
ret = bdrv_co_pread(bs->file, file_offset + trailing_length,
VHDX_LOG_SECTOR_SIZE - trailing_length,
merged_sector + trailing_length, 0);
if (ret < 0) {
@ -1036,7 +1037,8 @@ exit:
}
/* Perform a log write, and then immediately flush the entire log */
int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
int coroutine_fn
vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset)
{
int ret = 0;
@ -1047,7 +1049,7 @@ int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
/* Make sure data written (new and/or changed blocks) is stable
* on disk, before creating log entry */
ret = bdrv_flush(bs);
ret = bdrv_co_flush(bs);
if (ret < 0) {
goto exit;
}
@ -1059,7 +1061,7 @@ int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
logs.log = s->log;
/* Make sure log is stable on disk */
ret = bdrv_flush(bs);
ret = bdrv_co_flush(bs);
if (ret < 0) {
goto exit;
}

View File

@ -1250,12 +1250,13 @@ exit:
*
* Returns the file offset start of the new payload block
*/
static int vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s,
static int coroutine_fn GRAPH_RDLOCK
vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s,
uint64_t *new_offset, bool *need_zero)
{
int64_t current_len;
current_len = bdrv_getlength(bs->file->bs);
current_len = bdrv_co_getlength(bs->file->bs);
if (current_len < 0) {
return current_len;
}
@ -1271,7 +1272,7 @@ static int vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s,
if (*need_zero) {
int ret;
ret = bdrv_truncate(bs->file, *new_offset + s->block_size, false,
ret = bdrv_co_truncate(bs->file, *new_offset + s->block_size, false,
PREALLOC_MODE_OFF, BDRV_REQ_ZERO_WRITE, NULL);
if (ret != -ENOTSUP) {
*need_zero = false;
@ -1279,7 +1280,7 @@ static int vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s,
}
}
return bdrv_truncate(bs->file, *new_offset + s->block_size, false,
return bdrv_co_truncate(bs->file, *new_offset + s->block_size, false,
PREALLOC_MODE_OFF, 0, NULL);
}
@ -1572,12 +1573,10 @@ exit:
* The first 64KB of the Metadata section is reserved for the metadata
* header and entries; beyond that, the metadata items themselves reside.
*/
static int vhdx_create_new_metadata(BlockBackend *blk,
uint64_t image_size,
uint32_t block_size,
uint32_t sector_size,
uint64_t metadata_offset,
VHDXImageType type)
static int coroutine_fn
vhdx_create_new_metadata(BlockBackend *blk, uint64_t image_size,
uint32_t block_size, uint32_t sector_size,
uint64_t metadata_offset, VHDXImageType type)
{
int ret = 0;
uint32_t offset = 0;
@ -1668,12 +1667,12 @@ static int vhdx_create_new_metadata(BlockBackend *blk,
VHDX_META_FLAGS_IS_VIRTUAL_DISK;
vhdx_metadata_entry_le_export(&md_table_entry[4]);
ret = blk_pwrite(blk, metadata_offset, VHDX_HEADER_BLOCK_SIZE, buffer, 0);
ret = blk_co_pwrite(blk, metadata_offset, VHDX_HEADER_BLOCK_SIZE, buffer, 0);
if (ret < 0) {
goto exit;
}
ret = blk_pwrite(blk, metadata_offset + (64 * KiB),
ret = blk_co_pwrite(blk, metadata_offset + (64 * KiB),
VHDX_METADATA_ENTRY_BUFFER_SIZE, entry_buffer, 0);
if (ret < 0) {
goto exit;
@ -1694,7 +1693,8 @@ exit:
* Fixed images: default state of the BAT is fully populated, with
* file offsets and state PAYLOAD_BLOCK_FULLY_PRESENT.
*/
static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
static int coroutine_fn
vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
uint64_t image_size, VHDXImageType type,
bool use_zero_blocks, uint64_t file_offset,
uint32_t length, Error **errp)
@ -1718,13 +1718,13 @@ static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
if (type == VHDX_TYPE_DYNAMIC) {
/* All zeroes, so we can just extend the file - the end of the BAT
* is the furthest thing we have written yet */
ret = blk_truncate(blk, data_file_offset, false, PREALLOC_MODE_OFF,
ret = blk_co_truncate(blk, data_file_offset, false, PREALLOC_MODE_OFF,
0, errp);
if (ret < 0) {
goto exit;
}
} else if (type == VHDX_TYPE_FIXED) {
ret = blk_truncate(blk, data_file_offset + image_size, false,
ret = blk_co_truncate(blk, data_file_offset + image_size, false,
PREALLOC_MODE_OFF, 0, errp);
if (ret < 0) {
goto exit;
@ -1759,7 +1759,7 @@ static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
s->bat[sinfo.bat_idx] = cpu_to_le64(s->bat[sinfo.bat_idx]);
sector_num += s->sectors_per_block;
}
ret = blk_pwrite(blk, file_offset, length, s->bat, 0);
ret = blk_co_pwrite(blk, file_offset, length, s->bat, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to write the BAT");
goto exit;
@ -1780,14 +1780,11 @@ exit:
* to create the BAT itself, we will also cause the BAT to be
* created.
*/
static int vhdx_create_new_region_table(BlockBackend *blk,
uint64_t image_size,
uint32_t block_size,
uint32_t sector_size,
uint32_t log_size,
bool use_zero_blocks,
VHDXImageType type,
uint64_t *metadata_offset,
static int coroutine_fn
vhdx_create_new_region_table(BlockBackend *blk, uint64_t image_size,
uint32_t block_size, uint32_t sector_size,
uint32_t log_size, bool use_zero_blocks,
VHDXImageType type, uint64_t *metadata_offset,
Error **errp)
{
int ret = 0;
@ -1863,14 +1860,14 @@ static int vhdx_create_new_region_table(BlockBackend *blk,
}
/* Now write out the region headers to disk */
ret = blk_pwrite(blk, VHDX_REGION_TABLE_OFFSET, VHDX_HEADER_BLOCK_SIZE,
ret = blk_co_pwrite(blk, VHDX_REGION_TABLE_OFFSET, VHDX_HEADER_BLOCK_SIZE,
buffer, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to write first region table");
goto exit;
}
ret = blk_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, VHDX_HEADER_BLOCK_SIZE,
ret = blk_co_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, VHDX_HEADER_BLOCK_SIZE,
buffer, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to write second region table");

View File

@ -413,7 +413,8 @@ bool vhdx_checksum_is_valid(uint8_t *buf, size_t size, int crc_offset);
int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed,
Error **errp);
int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
int coroutine_fn GRAPH_RDLOCK
vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
void *data, uint32_t length, uint64_t offset);
static inline void leguid_to_cpus(MSGUID *guid)

View File

@ -339,7 +339,8 @@ out:
return ret;
}
static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
static int coroutine_fn GRAPH_RDLOCK
vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
{
char *desc, *tmp_desc;
char *p_name, *tmp_str;
@ -348,7 +349,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
desc = g_malloc0(DESC_SIZE);
tmp_desc = g_malloc0(DESC_SIZE);
ret = bdrv_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0);
ret = bdrv_co_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0);
if (ret < 0) {
goto out;
}
@ -368,7 +369,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
pstrcat(desc, DESC_SIZE, tmp_desc);
}
ret = bdrv_pwrite_sync(bs->file, s->desc_offset, DESC_SIZE, desc, 0);
ret = bdrv_co_pwrite_sync(bs->file, s->desc_offset, DESC_SIZE, desc, 0);
out:
g_free(desc);
@ -1437,7 +1438,7 @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent,
if (skip_start_bytes > 0) {
if (copy_from_backing) {
/* qcow2 emits this on bs->file instead of bs->backing */
BLKDBG_EVENT(extent->file, BLKDBG_COW_READ);
BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_READ);
ret = bdrv_co_pread(bs->backing, offset, skip_start_bytes,
whole_grain, 0);
if (ret < 0) {
@ -1445,7 +1446,7 @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent,
goto exit;
}
}
BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE);
BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwrite(extent->file, cluster_offset, skip_start_bytes,
whole_grain, 0);
if (ret < 0) {
@ -1457,7 +1458,7 @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent,
if (skip_end_bytes < cluster_bytes) {
if (copy_from_backing) {
/* qcow2 emits this on bs->file instead of bs->backing */
BLKDBG_EVENT(extent->file, BLKDBG_COW_READ);
BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_READ);
ret = bdrv_co_pread(bs->backing, offset + skip_end_bytes,
cluster_bytes - skip_end_bytes,
whole_grain + skip_end_bytes, 0);
@ -1466,7 +1467,7 @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent,
goto exit;
}
}
BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE);
BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwrite(extent->file, cluster_offset + skip_end_bytes,
cluster_bytes - skip_end_bytes,
whole_grain + skip_end_bytes, 0);
@ -1487,7 +1488,7 @@ vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data, uint32_t offset)
{
offset = cpu_to_le32(offset);
/* update L2 table */
BLKDBG_EVENT(extent->file, BLKDBG_L2_UPDATE);
BLKDBG_CO_EVENT(extent->file, BLKDBG_L2_UPDATE);
if (bdrv_co_pwrite(extent->file,
((int64_t)m_data->l2_offset * 512)
+ (m_data->l2_index * sizeof(offset)),
@ -1617,7 +1618,7 @@ get_cluster_offset(BlockDriverState *bs, VmdkExtent *extent,
}
}
l2_table = (char *)extent->l2_cache + (min_index * l2_size_bytes);
BLKDBG_EVENT(extent->file, BLKDBG_L2_LOAD);
BLKDBG_CO_EVENT(extent->file, BLKDBG_L2_LOAD);
if (bdrv_co_pread(extent->file,
(int64_t)l2_offset * 512,
l2_size_bytes,
@ -1828,12 +1829,12 @@ vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
n_bytes = buf_len + sizeof(VmdkGrainMarker);
qemu_iovec_init_buf(&local_qiov, data, n_bytes);
BLKDBG_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED);
BLKDBG_CO_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED);
} else {
qemu_iovec_init(&local_qiov, qiov->niov);
qemu_iovec_concat(&local_qiov, qiov, qiov_offset, n_bytes);
BLKDBG_EVENT(extent->file, BLKDBG_WRITE_AIO);
BLKDBG_CO_EVENT(extent->file, BLKDBG_WRITE_AIO);
}
write_offset = cluster_offset + offset_in_cluster;
@ -1875,7 +1876,7 @@ vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
if (!extent->compressed) {
BLKDBG_EVENT(extent->file, BLKDBG_READ_AIO);
BLKDBG_CO_EVENT(extent->file, BLKDBG_READ_AIO);
ret = bdrv_co_preadv(extent->file,
cluster_offset + offset_in_cluster, bytes,
qiov, 0);
@ -1889,7 +1890,7 @@ vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
buf_bytes = cluster_bytes * 2;
cluster_buf = g_malloc(buf_bytes);
uncomp_buf = g_malloc(cluster_bytes);
BLKDBG_EVENT(extent->file, BLKDBG_READ_COMPRESSED);
BLKDBG_CO_EVENT(extent->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_co_pread(extent->file, cluster_offset, buf_bytes, cluster_buf,
0);
if (ret < 0) {
@ -1967,7 +1968,7 @@ vmdk_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
/* qcow2 emits this on bs->file instead of bs->backing */
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
ret = bdrv_co_preadv(bs->backing, offset, n_bytes,
&local_qiov, 0);
if (ret < 0) {
@ -2131,7 +2132,7 @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t length;
for (i = 0; i < s->num_extents; i++) {
length = bdrv_getlength(s->extents[i].file->bs);
length = bdrv_co_getlength(s->extents[i].file->bs);
if (length < 0) {
return length;
}
@ -2165,7 +2166,7 @@ vmdk_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
return ret;
}
static int GRAPH_UNLOCKED
static int coroutine_fn GRAPH_UNLOCKED
vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
bool zeroed_grain, Error **errp)
{
@ -2176,7 +2177,7 @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
int gd_buf_size;
if (flat) {
ret = blk_truncate(blk, filesize, false, PREALLOC_MODE_OFF, 0, errp);
ret = blk_co_truncate(blk, filesize, false, PREALLOC_MODE_OFF, 0, errp);
goto exit;
}
magic = cpu_to_be32(VMDK4_MAGIC);
@ -2228,18 +2229,18 @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
header.check_bytes[3] = 0xa;
/* write all the data */
ret = blk_pwrite(blk, 0, sizeof(magic), &magic, 0);
ret = blk_co_pwrite(blk, 0, sizeof(magic), &magic, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
}
ret = blk_pwrite(blk, sizeof(magic), sizeof(header), &header, 0);
ret = blk_co_pwrite(blk, sizeof(magic), sizeof(header), &header, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
}
ret = blk_truncate(blk, le64_to_cpu(header.grain_offset) << 9, false,
ret = blk_co_truncate(blk, le64_to_cpu(header.grain_offset) << 9, false,
PREALLOC_MODE_OFF, 0, errp);
if (ret < 0) {
goto exit;
@ -2252,7 +2253,7 @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
i < gt_count; i++, tmp += gt_size) {
gd_buf[i] = cpu_to_le32(tmp);
}
ret = blk_pwrite(blk, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE,
ret = blk_co_pwrite(blk, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE,
gd_buf_size, gd_buf, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
@ -2264,7 +2265,7 @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress,
i < gt_count; i++, tmp += gt_size) {
gd_buf[i] = cpu_to_le32(tmp);
}
ret = blk_pwrite(blk, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE,
ret = blk_co_pwrite(blk, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE,
gd_buf_size, gd_buf, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
@ -2908,7 +2909,7 @@ vmdk_co_check(BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix)
BDRVVmdkState *s = bs->opaque;
VmdkExtent *extent = NULL;
int64_t sector_num = 0;
int64_t total_sectors = bdrv_nb_sectors(bs);
int64_t total_sectors = bdrv_co_nb_sectors(bs);
int ret;
uint64_t cluster_offset;
@ -2938,7 +2939,7 @@ vmdk_co_check(BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix)
break;
}
if (ret == VMDK_OK) {
int64_t extent_len = bdrv_getlength(extent->file->bs);
int64_t extent_len = bdrv_co_getlength(extent->file->bs);
if (extent_len < 0) {
fprintf(stderr,
"ERROR: could not get extent file length for sector %"

View File

@ -486,8 +486,8 @@ static int vpc_reopen_prepare(BDRVReopenState *state,
* operation (the block bitmaps is updated then), 0 otherwise.
* If write is true then err must not be NULL.
*/
static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
bool write, int *err)
static int64_t coroutine_fn GRAPH_RDLOCK
get_image_offset(BlockDriverState *bs, uint64_t offset, bool write, int *err)
{
BDRVVPCState *s = bs->opaque;
uint64_t bitmap_offset, block_offset;
@ -515,8 +515,7 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
s->last_bitmap_offset = bitmap_offset;
memset(bitmap, 0xff, s->bitmap_size);
r = bdrv_pwrite_sync(bs->file, bitmap_offset, s->bitmap_size, bitmap,
0);
r = bdrv_co_pwrite_sync(bs->file, bitmap_offset, s->bitmap_size, bitmap, 0);
if (r < 0) {
*err = r;
return -2;
@ -532,13 +531,13 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
*
* Returns 0 on success and < 0 on error
*/
static int rewrite_footer(BlockDriverState *bs)
static int coroutine_fn GRAPH_RDLOCK rewrite_footer(BlockDriverState *bs)
{
int ret;
BDRVVPCState *s = bs->opaque;
int64_t offset = s->free_data_block_offset;
ret = bdrv_pwrite_sync(bs->file, offset, sizeof(s->footer), &s->footer, 0);
ret = bdrv_co_pwrite_sync(bs->file, offset, sizeof(s->footer), &s->footer, 0);
if (ret < 0)
return ret;
@ -552,7 +551,8 @@ static int rewrite_footer(BlockDriverState *bs)
*
* Returns the sectors' offset in the image file on success and < 0 on error
*/
static int64_t alloc_block(BlockDriverState *bs, int64_t offset)
static int64_t coroutine_fn GRAPH_RDLOCK
alloc_block(BlockDriverState *bs, int64_t offset)
{
BDRVVPCState *s = bs->opaque;
int64_t bat_offset;
@ -572,7 +572,7 @@ static int64_t alloc_block(BlockDriverState *bs, int64_t offset)
/* Initialize the block's bitmap */
memset(bitmap, 0xff, s->bitmap_size);
ret = bdrv_pwrite_sync(bs->file, s->free_data_block_offset,
ret = bdrv_co_pwrite_sync(bs->file, s->free_data_block_offset,
s->bitmap_size, bitmap, 0);
if (ret < 0) {
return ret;
@ -587,7 +587,7 @@ static int64_t alloc_block(BlockDriverState *bs, int64_t offset)
/* Write BAT entry to disk */
bat_offset = s->bat_offset + (4 * index);
bat_value = cpu_to_be32(s->pagetable[index]);
ret = bdrv_pwrite_sync(bs->file, bat_offset, 4, &bat_value, 0);
ret = bdrv_co_pwrite_sync(bs->file, bat_offset, 4, &bat_value, 0);
if (ret < 0)
goto fail;
@ -718,8 +718,8 @@ fail:
return ret;
}
static int coroutine_fn vpc_co_block_status(BlockDriverState *bs,
bool want_zero,
static int coroutine_fn GRAPH_RDLOCK
vpc_co_block_status(BlockDriverState *bs, bool want_zero,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
@ -820,7 +820,7 @@ static int calculate_geometry(int64_t total_sectors, uint16_t *cyls,
return 0;
}
static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
static int coroutine_fn create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
int64_t total_sectors)
{
VHDDynDiskHeader dyndisk_header;
@ -834,13 +834,13 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
block_size = 0x200000;
num_bat_entries = DIV_ROUND_UP(total_sectors, block_size / 512);
ret = blk_pwrite(blk, offset, sizeof(*footer), footer, 0);
ret = blk_co_pwrite(blk, offset, sizeof(*footer), footer, 0);
if (ret < 0) {
goto fail;
}
offset = 1536 + ((num_bat_entries * 4 + 511) & ~511);
ret = blk_pwrite(blk, offset, sizeof(*footer), footer, 0);
ret = blk_co_pwrite(blk, offset, sizeof(*footer), footer, 0);
if (ret < 0) {
goto fail;
}
@ -850,7 +850,7 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
memset(bat_sector, 0xFF, 512);
for (i = 0; i < DIV_ROUND_UP(num_bat_entries * 4, 512); i++) {
ret = blk_pwrite(blk, offset, 512, bat_sector, 0);
ret = blk_co_pwrite(blk, offset, 512, bat_sector, 0);
if (ret < 0) {
goto fail;
}
@ -878,7 +878,7 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
/* Write the header */
offset = 512;
ret = blk_pwrite(blk, offset, sizeof(dyndisk_header), &dyndisk_header, 0);
ret = blk_co_pwrite(blk, offset, sizeof(dyndisk_header), &dyndisk_header, 0);
if (ret < 0) {
goto fail;
}
@ -888,7 +888,7 @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer,
return ret;
}
static int create_fixed_disk(BlockBackend *blk, VHDFooter *footer,
static int coroutine_fn create_fixed_disk(BlockBackend *blk, VHDFooter *footer,
int64_t total_size, Error **errp)
{
int ret;
@ -896,12 +896,12 @@ static int create_fixed_disk(BlockBackend *blk, VHDFooter *footer,
/* Add footer to total size */
total_size += sizeof(*footer);
ret = blk_truncate(blk, total_size, false, PREALLOC_MODE_OFF, 0, errp);
ret = blk_co_truncate(blk, total_size, false, PREALLOC_MODE_OFF, 0, errp);
if (ret < 0) {
return ret;
}
ret = blk_pwrite(blk, total_size - sizeof(*footer), sizeof(*footer),
ret = blk_co_pwrite(blk, total_size - sizeof(*footer), sizeof(*footer),
footer, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Unable to write VHD header");

View File

@ -230,21 +230,28 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
uint64_t perm, uint64_t shared_perm, Error **errp)
{
BdrvChild *c;
AioContext *ctx = bdrv_get_aio_context(bs);
bool need_context_ops;
GLOBAL_STATE_CODE();
bdrv_ref(bs);
need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context;
need_context_ops = ctx != job->job.aio_context;
if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
if (need_context_ops) {
if (job->job.aio_context != qemu_get_aio_context()) {
aio_context_release(job->job.aio_context);
}
aio_context_acquire(ctx);
}
c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
errp);
if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
if (need_context_ops) {
aio_context_release(ctx);
if (job->job.aio_context != qemu_get_aio_context()) {
aio_context_acquire(job->job.aio_context);
}
}
if (c == NULL) {
return -EPERM;
}

View File

@ -20,7 +20,6 @@
#ifndef TARGET_ARCH_ELF_H
#define TARGET_ARCH_ELF_H
#define ELF_START_MMAP 0x80000000
#define ELF_ET_DYN_LOAD_ADDR 0x500000
#define elf_check_arch(x) ((x) == EM_ARM)

View File

@ -738,8 +738,6 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
/* OK, This is the point of no return */
info->end_data = 0;
info->end_code = 0;
info->start_mmap = (abi_ulong)ELF_START_MMAP;
info->mmap = 0;
elf_entry = (abi_ulong) elf_ex.e_entry;
/* XXX Join this with PT_INTERP search? */
@ -813,7 +811,7 @@ int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
bprm->stringp, &elf_ex, load_addr,
et_dyn_addr, interp_load_addr, info);
info->load_addr = reloc_func_desc;
info->start_brk = info->brk = elf_brk;
info->brk = elf_brk;
info->start_stack = bprm->p;
info->load_bias = 0;

View File

@ -1,4 +1,5 @@
bsd_user_ss.add(files(
'os-stat.c',
'os-sys.c',
'os-syscall.c',
))

262
bsd-user/freebsd/os-stat.c Normal file
View File

@ -0,0 +1,262 @@
/*
* FreeBSD stat related conversion routines
*
* Copyright (c) 2013 Stacey D. Son
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu.h"
/*
* stat conversion
*/
abi_long h2t_freebsd11_stat(abi_ulong target_addr,
struct freebsd11_stat *host_st)
{
struct target_freebsd11_stat *target_st;
if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) {
return -TARGET_EFAULT;
}
memset(target_st, 0, sizeof(*target_st));
__put_user(host_st->st_dev, &target_st->st_dev);
__put_user(host_st->st_ino, &target_st->st_ino);
__put_user(host_st->st_mode, &target_st->st_mode);
__put_user(host_st->st_nlink, &target_st->st_nlink);
__put_user(host_st->st_uid, &target_st->st_uid);
__put_user(host_st->st_gid, &target_st->st_gid);
__put_user(host_st->st_rdev, &target_st->st_rdev);
__put_user(host_st->st_atim.tv_sec, &target_st->st_atim.tv_sec);
__put_user(host_st->st_atim.tv_nsec, &target_st->st_atim.tv_nsec);
__put_user(host_st->st_mtim.tv_sec, &target_st->st_mtim.tv_sec);
__put_user(host_st->st_mtim.tv_nsec, &target_st->st_mtim.tv_nsec);
__put_user(host_st->st_ctim.tv_sec, &target_st->st_ctim.tv_sec);
__put_user(host_st->st_ctim.tv_nsec, &target_st->st_ctim.tv_nsec);
__put_user(host_st->st_size, &target_st->st_size);
__put_user(host_st->st_blocks, &target_st->st_blocks);
__put_user(host_st->st_blksize, &target_st->st_blksize);
__put_user(host_st->st_flags, &target_st->st_flags);
__put_user(host_st->st_gen, &target_st->st_gen);
/* st_lspare not used */
__put_user(host_st->st_birthtim.tv_sec, &target_st->st_birthtim.tv_sec);
__put_user(host_st->st_birthtim.tv_nsec, &target_st->st_birthtim.tv_nsec);
unlock_user_struct(target_st, target_addr, 1);
return 0;
}
abi_long h2t_freebsd_stat(abi_ulong target_addr,
struct stat *host_st)
{
struct target_stat *target_st;
if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) {
return -TARGET_EFAULT;
}
memset(target_st, 0, sizeof(*target_st));
__put_user(host_st->st_dev, &target_st->st_dev);
__put_user(host_st->st_ino, &target_st->st_ino);
__put_user(host_st->st_nlink, &target_st->st_nlink);
__put_user(host_st->st_mode, &target_st->st_mode);
__put_user(host_st->st_uid, &target_st->st_uid);
__put_user(host_st->st_gid, &target_st->st_gid);
__put_user(host_st->st_rdev, &target_st->st_rdev);
__put_user(host_st->st_atim.tv_sec, &target_st->st_atim.tv_sec);
__put_user(host_st->st_atim.tv_nsec, &target_st->st_atim.tv_nsec);
#ifdef TARGET_HAS_STAT_TIME_T_EXT
/* __put_user(host_st->st_mtim_ext, &target_st->st_mtim_ext); XXX */
#endif
__put_user(host_st->st_mtim.tv_sec, &target_st->st_mtim.tv_sec);
__put_user(host_st->st_mtim.tv_nsec, &target_st->st_mtim.tv_nsec);
#ifdef TARGET_HAS_STAT_TIME_T_EXT
/* __put_user(host_st->st_ctim_ext, &target_st->st_ctim_ext); XXX */
#endif
__put_user(host_st->st_ctim.tv_sec, &target_st->st_ctim.tv_sec);
__put_user(host_st->st_ctim.tv_nsec, &target_st->st_ctim.tv_nsec);
#ifdef TARGET_HAS_STAT_TIME_T_EXT
/* __put_user(host_st->st_birthtim_ext, &target_st->st_birthtim_ext); XXX */
#endif
__put_user(host_st->st_birthtim.tv_sec, &target_st->st_birthtim.tv_sec);
__put_user(host_st->st_birthtim.tv_nsec, &target_st->st_birthtim.tv_nsec);
__put_user(host_st->st_size, &target_st->st_size);
__put_user(host_st->st_blocks, &target_st->st_blocks);
__put_user(host_st->st_blksize, &target_st->st_blksize);
__put_user(host_st->st_flags, &target_st->st_flags);
__put_user(host_st->st_gen, &target_st->st_gen);
unlock_user_struct(target_st, target_addr, 1);
return 0;
}
abi_long h2t_freebsd11_nstat(abi_ulong target_addr,
struct freebsd11_stat *host_st)
{
struct target_freebsd11_nstat *target_st;
if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) {
return -TARGET_EFAULT;
}
memset(target_st, 0, sizeof(*target_st));
__put_user(host_st->st_dev, &target_st->st_dev);
__put_user(host_st->st_ino, &target_st->st_ino);
__put_user(host_st->st_mode, &target_st->st_mode);
__put_user(host_st->st_nlink, &target_st->st_nlink);
__put_user(host_st->st_uid, &target_st->st_uid);
__put_user(host_st->st_gid, &target_st->st_gid);
__put_user(host_st->st_rdev, &target_st->st_rdev);
__put_user(host_st->st_atim.tv_sec, &target_st->st_atim.tv_sec);
__put_user(host_st->st_atim.tv_nsec, &target_st->st_atim.tv_nsec);
__put_user(host_st->st_mtim.tv_sec, &target_st->st_mtim.tv_sec);
__put_user(host_st->st_mtim.tv_nsec, &target_st->st_mtim.tv_nsec);
__put_user(host_st->st_ctim.tv_sec, &target_st->st_ctim.tv_sec);
__put_user(host_st->st_ctim.tv_nsec, &target_st->st_ctim.tv_nsec);
__put_user(host_st->st_size, &target_st->st_size);
__put_user(host_st->st_blocks, &target_st->st_blocks);
__put_user(host_st->st_blksize, &target_st->st_blksize);
__put_user(host_st->st_flags, &target_st->st_flags);
__put_user(host_st->st_gen, &target_st->st_gen);
__put_user(host_st->st_birthtim.tv_sec, &target_st->st_birthtim.tv_sec);
__put_user(host_st->st_birthtim.tv_nsec, &target_st->st_birthtim.tv_nsec);
unlock_user_struct(target_st, target_addr, 1);
return 0;
}
/*
* file handle conversion
*/
abi_long t2h_freebsd_fhandle(fhandle_t *host_fh, abi_ulong target_addr)
{
target_freebsd_fhandle_t *target_fh;
if (!lock_user_struct(VERIFY_READ, target_fh, target_addr, 1)) {
return -TARGET_EFAULT;
}
__get_user(host_fh->fh_fsid.val[0], &target_fh->fh_fsid.val[0]);
__get_user(host_fh->fh_fsid.val[1], &target_fh->fh_fsid.val[0]);
__get_user(host_fh->fh_fid.fid_len, &target_fh->fh_fid.fid_len);
/* u_short fid_data0; */
memcpy(host_fh->fh_fid.fid_data, target_fh->fh_fid.fid_data,
TARGET_MAXFIDSZ);
unlock_user_struct(target_fh, target_addr, 0);
return 0;
}
abi_long h2t_freebsd_fhandle(abi_ulong target_addr, fhandle_t *host_fh)
{
target_freebsd_fhandle_t *target_fh;
if (!lock_user_struct(VERIFY_WRITE, target_fh, target_addr, 0)) {
return -TARGET_EFAULT;
}
__put_user(host_fh->fh_fsid.val[0], &target_fh->fh_fsid.val[0]);
__put_user(host_fh->fh_fsid.val[1], &target_fh->fh_fsid.val[0]);
__put_user(host_fh->fh_fid.fid_len, &target_fh->fh_fid.fid_len);
/* u_short fid_data0; */
memcpy(target_fh->fh_fid.fid_data, host_fh->fh_fid.fid_data,
TARGET_MAXFIDSZ);
unlock_user_struct(target_fh, target_addr, 1);
return 0;
}
/*
* file system stat
*/
abi_long h2t_freebsd11_statfs(abi_ulong target_addr,
struct freebsd11_statfs *host_statfs)
{
struct target_freebsd11_statfs *target_statfs;
if (!lock_user_struct(VERIFY_WRITE, target_statfs, target_addr, 0)) {
return -TARGET_EFAULT;
}
__put_user(host_statfs->f_version, &target_statfs->f_version);
__put_user(host_statfs->f_type, &target_statfs->f_type);
__put_user(host_statfs->f_flags, &target_statfs->f_flags);
__put_user(host_statfs->f_bsize, &target_statfs->f_bsize);
__put_user(host_statfs->f_iosize, &target_statfs->f_iosize);
__put_user(host_statfs->f_blocks, &target_statfs->f_blocks);
__put_user(host_statfs->f_bfree, &target_statfs->f_bfree);
__put_user(host_statfs->f_bavail, &target_statfs->f_bavail);
__put_user(host_statfs->f_files, &target_statfs->f_files);
__put_user(host_statfs->f_ffree, &target_statfs->f_ffree);
__put_user(host_statfs->f_syncwrites, &target_statfs->f_syncwrites);
__put_user(host_statfs->f_asyncwrites, &target_statfs->f_asyncwrites);
__put_user(host_statfs->f_syncreads, &target_statfs->f_syncreads);
__put_user(host_statfs->f_asyncreads, &target_statfs->f_asyncreads);
/* uint64_t f_spare[10]; */
__put_user(host_statfs->f_namemax, &target_statfs->f_namemax);
__put_user(host_statfs->f_owner, &target_statfs->f_owner);
__put_user(host_statfs->f_fsid.val[0], &target_statfs->f_fsid.val[0]);
__put_user(host_statfs->f_fsid.val[1], &target_statfs->f_fsid.val[1]);
/* char f_charspace[80]; */
strncpy(target_statfs->f_fstypename, host_statfs->f_fstypename,
sizeof(target_statfs->f_fstypename));
strncpy(target_statfs->f_mntfromname, host_statfs->f_mntfromname,
sizeof(target_statfs->f_mntfromname));
strncpy(target_statfs->f_mntonname, host_statfs->f_mntonname,
sizeof(target_statfs->f_mntonname));
unlock_user_struct(target_statfs, target_addr, 1);
return 0;
}
abi_long h2t_freebsd_statfs(abi_ulong target_addr,
struct statfs *host_statfs)
{
struct target_statfs *target_statfs;
if (!lock_user_struct(VERIFY_WRITE, target_statfs, target_addr, 0)) {
return -TARGET_EFAULT;
}
__put_user(host_statfs->f_version, &target_statfs->f_version);
__put_user(host_statfs->f_type, &target_statfs->f_type);
__put_user(host_statfs->f_flags, &target_statfs->f_flags);
__put_user(host_statfs->f_bsize, &target_statfs->f_bsize);
__put_user(host_statfs->f_iosize, &target_statfs->f_iosize);
__put_user(host_statfs->f_blocks, &target_statfs->f_blocks);
__put_user(host_statfs->f_bfree, &target_statfs->f_bfree);
__put_user(host_statfs->f_bavail, &target_statfs->f_bavail);
__put_user(host_statfs->f_files, &target_statfs->f_files);
__put_user(host_statfs->f_ffree, &target_statfs->f_ffree);
__put_user(host_statfs->f_syncwrites, &target_statfs->f_syncwrites);
__put_user(host_statfs->f_asyncwrites, &target_statfs->f_asyncwrites);
__put_user(host_statfs->f_syncreads, &target_statfs->f_syncreads);
__put_user(host_statfs->f_asyncreads, &target_statfs->f_asyncreads);
/* uint64_t f_spare[10]; */
__put_user(host_statfs->f_namemax, &target_statfs->f_namemax);
__put_user(host_statfs->f_owner, &target_statfs->f_owner);
__put_user(host_statfs->f_fsid.val[0], &target_statfs->f_fsid.val[0]);
__put_user(host_statfs->f_fsid.val[1], &target_statfs->f_fsid.val[1]);
/* char f_charspace[80]; */
strncpy(target_statfs->f_fstypename, host_statfs->f_fstypename,
sizeof(target_statfs->f_fstypename));
strncpy(target_statfs->f_mntfromname, host_statfs->f_mntfromname,
sizeof(target_statfs->f_mntfromname));
strncpy(target_statfs->f_mntonname, host_statfs->f_mntonname,
sizeof(target_statfs->f_mntonname));
unlock_user_struct(target_statfs, target_addr, 1);
return 0;
}
/*
* fcntl cmd conversion
*/
abi_long target_to_host_fcntl_cmd(int cmd)
{
return cmd;
}

663
bsd-user/freebsd/os-stat.h Normal file
View File

@ -0,0 +1,663 @@
/*
* stat related system call shims and definitions
*
* Copyright (c) 2013 Stacey D. Son
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef BSD_USER_FREEBSD_OS_STAT_H
#define BSD_USER_FREEBSD_OS_STAT_H
int freebsd11_stat(const char *path, struct freebsd11_stat *stat);
__sym_compat(stat, freebsd11_stat, FBSD_1.0);
int freebsd11_lstat(const char *path, struct freebsd11_stat *stat);
__sym_compat(lstat, freebsd11_lstat, FBSD_1.0);
int freebsd11_fstat(int fd, struct freebsd11_stat *stat);
__sym_compat(fstat, freebsd11_fstat, FBSD_1.0);
int freebsd11_fstatat(int fd, const char *path, struct freebsd11_stat *stat,
int flag);
__sym_compat(fstatat, freebsd11_fstatat, FBSD_1.1);
int freebsd11_fhstat(const fhandle_t *fhandle, struct freebsd11_stat *stat);
__sym_compat(fhstat, freebsd11_fhstat, FBSD_1.0);
int freebsd11_getfsstat(struct freebsd11_statfs *buf, long bufsize, int mode);
__sym_compat(getfsstat, freebsd11_getfsstat, FBSD_1.0);
int freebsd11_fhstatfs(const fhandle_t *fhandle, struct freebsd11_statfs * buf);
__sym_compat(fhstatfs, freebsd11_fhstatfs, FBSD_1.0);
int freebsd11_statfs(const char *path, struct freebsd11_statfs *buf);
__sym_compat(statfs, freebsd11_statfs, FBSD_1.0);
int freebsd11_fstatfs(int fd, struct freebsd11_statfs *buf);
__sym_compat(fstatfs, freebsd11_fstatfs, FBSD_1.0);
ssize_t freebsd11_getdirentries(int fd, char *buf, size_t nbytes, off_t *basep);
__sym_compat(getdirentries, freebsd11_getdirentries, FBSD_1.0);
ssize_t freebsd11_getdents(int fd, char *buf, size_t nbytes);
__sym_compat(getdents, freebsd11_getdents, FBSD_1.0);
/* undocumented nstat system calls */
int freebsd11_nstat(const char *path, struct freebsd11_stat *sb);
__sym_compat(nstat, freebsd11_nstat, FBSD_1.0);
int freebsd11_nlstat(const char *path, struct freebsd11_stat *sb);
__sym_compat(nlstat, freebsd11_nlstat, FBSD_1.0);
int freebsd11_nfstat(int fd, struct freebsd11_stat *sb);
__sym_compat(nfstat, freebsd11_nfstat, FBSD_1.0);
/* stat(2) */
static inline abi_long do_freebsd11_stat(abi_long arg1, abi_long arg2)
{
abi_long ret;
void *p;
struct freebsd11_stat st;
LOCK_PATH(p, arg1);
ret = get_errno(freebsd11_stat(path(p), &st));
UNLOCK_PATH(p, arg1);
if (!is_error(ret)) {
ret = h2t_freebsd11_stat(arg2, &st);
}
return ret;
}
/* lstat(2) */
static inline abi_long do_freebsd11_lstat(abi_long arg1, abi_long arg2)
{
abi_long ret;
void *p;
struct freebsd11_stat st;
LOCK_PATH(p, arg1);
ret = get_errno(freebsd11_lstat(path(p), &st));
UNLOCK_PATH(p, arg1);
if (!is_error(ret)) {
ret = h2t_freebsd11_stat(arg2, &st);
}
return ret;
}
/* fstat(2) */
static inline abi_long do_freebsd11_fstat(abi_long arg1, abi_long arg2)
{
abi_long ret;
struct freebsd11_stat st;
ret = get_errno(freebsd11_fstat(arg1, &st));
if (!is_error(ret)) {
ret = h2t_freebsd11_stat(arg2, &st);
}
return ret;
}
/* fstat(2) */
static inline abi_long do_freebsd_fstat(abi_long arg1, abi_long arg2)
{
abi_long ret;
struct stat st;
ret = get_errno(fstat(arg1, &st));
if (!is_error(ret)) {
ret = h2t_freebsd_stat(arg2, &st);
}
return ret;
}
/* fstatat(2) */
static inline abi_long do_freebsd11_fstatat(abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4)
{
abi_long ret;
void *p;
struct freebsd11_stat st;
LOCK_PATH(p, arg2);
ret = get_errno(freebsd11_fstatat(arg1, p, &st, arg4));
UNLOCK_PATH(p, arg2);
if (!is_error(ret) && arg3) {
ret = h2t_freebsd11_stat(arg3, &st);
}
return ret;
}
/* fstatat(2) */
static inline abi_long do_freebsd_fstatat(abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4)
{
abi_long ret;
void *p;
struct stat st;
LOCK_PATH(p, arg2);
ret = get_errno(fstatat(arg1, p, &st, arg4));
UNLOCK_PATH(p, arg2);
if (!is_error(ret) && arg3) {
ret = h2t_freebsd_stat(arg3, &st);
}
return ret;
}
/* undocummented nstat(char *path, struct nstat *ub) syscall */
static abi_long do_freebsd11_nstat(abi_long arg1, abi_long arg2)
{
abi_long ret;
void *p;
struct freebsd11_stat st;
LOCK_PATH(p, arg1);
ret = get_errno(freebsd11_nstat(path(p), &st));
UNLOCK_PATH(p, arg1);
if (!is_error(ret)) {
ret = h2t_freebsd11_nstat(arg2, &st);
}
return ret;
}
/* undocummented nfstat(int fd, struct nstat *sb) syscall */
static abi_long do_freebsd11_nfstat(abi_long arg1, abi_long arg2)
{
abi_long ret;
struct freebsd11_stat st;
ret = get_errno(freebsd11_nfstat(arg1, &st));
if (!is_error(ret)) {
ret = h2t_freebsd11_nstat(arg2, &st);
}
return ret;
}
/* undocummented nlstat(char *path, struct nstat *ub) syscall */
static abi_long do_freebsd11_nlstat(abi_long arg1, abi_long arg2)
{
abi_long ret;
void *p;
struct freebsd11_stat st;
LOCK_PATH(p, arg1);
ret = get_errno(freebsd11_nlstat(path(p), &st));
UNLOCK_PATH(p, arg1);
if (!is_error(ret)) {
ret = h2t_freebsd11_nstat(arg2, &st);
}
return ret;
}
/* getfh(2) */
static abi_long do_freebsd_getfh(abi_long arg1, abi_long arg2)
{
abi_long ret;
void *p;
fhandle_t host_fh;
LOCK_PATH(p, arg1);
ret = get_errno(getfh(path(p), &host_fh));
UNLOCK_PATH(p, arg1);
if (is_error(ret)) {
return ret;
}
return h2t_freebsd_fhandle(arg2, &host_fh);
}
/* lgetfh(2) */
static inline abi_long do_freebsd_lgetfh(abi_long arg1, abi_long arg2)
{
abi_long ret;
void *p;
fhandle_t host_fh;
LOCK_PATH(p, arg1);
ret = get_errno(lgetfh(path(p), &host_fh));
UNLOCK_PATH(p, arg1);
if (is_error(ret)) {
return ret;
}
return h2t_freebsd_fhandle(arg2, &host_fh);
}
/* fhopen(2) */
static inline abi_long do_freebsd_fhopen(abi_long arg1, abi_long arg2)
{
abi_long ret;
fhandle_t host_fh;
ret = t2h_freebsd_fhandle(&host_fh, arg1);
if (is_error(ret)) {
return ret;
}
return get_errno(fhopen(&host_fh, arg2));
}
/* fhstat(2) */
static inline abi_long do_freebsd11_fhstat(abi_long arg1, abi_long arg2)
{
abi_long ret;
fhandle_t host_fh;
struct freebsd11_stat host_sb;
ret = t2h_freebsd_fhandle(&host_fh, arg1);
if (is_error(ret)) {
return ret;
}
ret = get_errno(freebsd11_fhstat(&host_fh, &host_sb));
if (is_error(ret)) {
return ret;
}
return h2t_freebsd11_stat(arg2, &host_sb);
}
/* fhstat(2) */
static inline abi_long do_freebsd_fhstat(abi_long arg1, abi_long arg2)
{
abi_long ret;
fhandle_t host_fh;
struct stat host_sb;
ret = t2h_freebsd_fhandle(&host_fh, arg1);
if (is_error(ret)) {
return ret;
}
ret = get_errno(fhstat(&host_fh, &host_sb));
if (is_error(ret)) {
return ret;
}
return h2t_freebsd_stat(arg2, &host_sb);
}
/* fhstatfs(2) */
static inline abi_long do_freebsd11_fhstatfs(abi_ulong target_fhp_addr,
abi_ulong target_stfs_addr)
{
abi_long ret;
fhandle_t host_fh;
struct freebsd11_statfs host_stfs;
ret = t2h_freebsd_fhandle(&host_fh, target_fhp_addr);
if (is_error(ret)) {
return ret;
}
ret = get_errno(freebsd11_fhstatfs(&host_fh, &host_stfs));
if (is_error(ret)) {
return ret;
}
return h2t_freebsd11_statfs(target_stfs_addr, &host_stfs);
}
/* fhstatfs(2) */
static inline abi_long do_freebsd_fhstatfs(abi_ulong target_fhp_addr,
abi_ulong target_stfs_addr)
{
abi_long ret;
fhandle_t host_fh;
struct statfs host_stfs;
ret = t2h_freebsd_fhandle(&host_fh, target_fhp_addr);
if (is_error(ret)) {
return ret;
}
ret = get_errno(fhstatfs(&host_fh, &host_stfs));
if (is_error(ret)) {
return ret;
}
return h2t_freebsd_statfs(target_stfs_addr, &host_stfs);
}
/* statfs(2) */
static inline abi_long do_freebsd11_statfs(abi_long arg1, abi_long arg2)
{
abi_long ret;
void *p;
struct freebsd11_statfs host_stfs;
LOCK_PATH(p, arg1);
ret = get_errno(freebsd11_statfs(path(p), &host_stfs));
UNLOCK_PATH(p, arg1);
if (is_error(ret)) {
return ret;
}
return h2t_freebsd11_statfs(arg2, &host_stfs);
}
/* statfs(2) */
static inline abi_long do_freebsd_statfs(abi_long arg1, abi_long arg2)
{
abi_long ret;
void *p;
struct statfs host_stfs;
LOCK_PATH(p, arg1);
ret = get_errno(statfs(path(p), &host_stfs));
UNLOCK_PATH(p, arg1);
if (is_error(ret)) {
return ret;
}
return h2t_freebsd_statfs(arg2, &host_stfs);
}
/* fstatfs(2) */
static inline abi_long do_freebsd11_fstatfs(abi_long fd, abi_ulong target_addr)
{
abi_long ret;
struct freebsd11_statfs host_stfs;
ret = get_errno(freebsd11_fstatfs(fd, &host_stfs));
if (is_error(ret)) {
return ret;
}
return h2t_freebsd11_statfs(target_addr, &host_stfs);
}
/* fstatfs(2) */
static inline abi_long do_freebsd_fstatfs(abi_long fd, abi_ulong target_addr)
{
abi_long ret;
struct statfs host_stfs;
ret = get_errno(fstatfs(fd, &host_stfs));
if (is_error(ret)) {
return ret;
}
return h2t_freebsd_statfs(target_addr, &host_stfs);
}
/* getfsstat(2) */
static inline abi_long do_freebsd11_getfsstat(abi_ulong target_addr,
abi_long bufsize, abi_long flags)
{
abi_long ret;
struct freebsd11_statfs *host_stfs;
int count;
long host_bufsize;
count = bufsize / sizeof(struct target_freebsd11_statfs);
/* if user buffer is NULL then return number of mounted FS's */
if (target_addr == 0 || count == 0) {
return get_errno(freebsd11_getfsstat(NULL, 0, flags));
}
/* XXX check count to be reasonable */
host_bufsize = sizeof(struct freebsd11_statfs) * count;
host_stfs = alloca(host_bufsize);
if (!host_stfs) {
return -TARGET_EINVAL;
}
ret = count = get_errno(freebsd11_getfsstat(host_stfs, host_bufsize, flags));
if (is_error(ret)) {
return ret;
}
while (count--) {
if (h2t_freebsd11_statfs((target_addr +
(count * sizeof(struct target_freebsd11_statfs))),
&host_stfs[count])) {
return -TARGET_EFAULT;
}
}
return ret;
}
/* getfsstat(2) */
static inline abi_long do_freebsd_getfsstat(abi_ulong target_addr,
abi_long bufsize, abi_long flags)
{
abi_long ret;
struct statfs *host_stfs;
int count;
long host_bufsize;
count = bufsize / sizeof(struct target_statfs);
/* if user buffer is NULL then return number of mounted FS's */
if (target_addr == 0 || count == 0) {
return get_errno(freebsd11_getfsstat(NULL, 0, flags));
}
/* XXX check count to be reasonable */
host_bufsize = sizeof(struct statfs) * count;
host_stfs = alloca(host_bufsize);
if (!host_stfs) {
return -TARGET_EINVAL;
}
ret = count = get_errno(getfsstat(host_stfs, host_bufsize, flags));
if (is_error(ret)) {
return ret;
}
while (count--) {
if (h2t_freebsd_statfs((target_addr +
(count * sizeof(struct target_statfs))),
&host_stfs[count])) {
return -TARGET_EFAULT;
}
}
return ret;
}
/* getdents(2) */
static inline abi_long do_freebsd11_getdents(abi_long arg1,
abi_ulong arg2, abi_long nbytes)
{
abi_long ret;
struct freebsd11_dirent *dirp;
dirp = lock_user(VERIFY_WRITE, arg2, nbytes, 0);
if (dirp == NULL) {
return -TARGET_EFAULT;
}
ret = get_errno(freebsd11_getdents(arg1, (char *)dirp, nbytes));
if (!is_error(ret)) {
struct freebsd11_dirent *de;
int len = ret;
int reclen;
de = dirp;
while (len > 0) {
reclen = de->d_reclen;
if (reclen > len) {
return -TARGET_EFAULT;
}
de->d_reclen = tswap16(reclen);
de->d_fileno = tswap32(de->d_fileno);
len -= reclen;
}
}
return ret;
}
/* getdirecentries(2) */
static inline abi_long do_freebsd11_getdirentries(abi_long arg1,
abi_ulong arg2, abi_long nbytes, abi_ulong arg4)
{
abi_long ret;
struct freebsd11_dirent *dirp;
long basep;
dirp = lock_user(VERIFY_WRITE, arg2, nbytes, 0);
if (dirp == NULL) {
return -TARGET_EFAULT;
}
ret = get_errno(freebsd11_getdirentries(arg1, (char *)dirp, nbytes, &basep));
if (!is_error(ret)) {
struct freebsd11_dirent *de;
int len = ret;
int reclen;
de = dirp;
while (len > 0) {
reclen = de->d_reclen;
if (reclen > len) {
return -TARGET_EFAULT;
}
de->d_reclen = tswap16(reclen);
de->d_fileno = tswap32(de->d_fileno);
len -= reclen;
de = (struct freebsd11_dirent *)((void *)de + reclen);
}
}
unlock_user(dirp, arg2, ret);
if (arg4) {
if (put_user(basep, arg4, abi_ulong)) {
return -TARGET_EFAULT;
}
}
return ret;
}
/* getdirecentries(2) */
static inline abi_long do_freebsd_getdirentries(abi_long arg1,
abi_ulong arg2, abi_long nbytes, abi_ulong arg4)
{
abi_long ret;
struct dirent *dirp;
long basep;
dirp = lock_user(VERIFY_WRITE, arg2, nbytes, 0);
if (dirp == NULL) {
return -TARGET_EFAULT;
}
ret = get_errno(getdirentries(arg1, (char *)dirp, nbytes, &basep));
if (!is_error(ret)) {
struct dirent *de;
int len = ret;
int reclen;
de = dirp;
while (len > 0) {
reclen = de->d_reclen;
if (reclen > len) {
return -TARGET_EFAULT;
}
de->d_fileno = tswap64(de->d_fileno);
de->d_off = tswap64(de->d_off);
de->d_reclen = tswap16(de->d_reclen);
de->d_namlen = tswap16(de->d_namlen);
len -= reclen;
de = (struct dirent *)((void *)de + reclen);
}
}
unlock_user(dirp, arg2, ret);
if (arg4) {
if (put_user(basep, arg4, abi_ulong)) {
return -TARGET_EFAULT;
}
}
return ret;
}
/* fcntl(2) */
static inline abi_long do_freebsd_fcntl(abi_long arg1, abi_long arg2,
abi_ulong arg3)
{
abi_long ret;
int host_cmd;
struct flock fl;
struct target_freebsd_flock *target_fl;
host_cmd = target_to_host_fcntl_cmd(arg2);
if (host_cmd < 0) {
return host_cmd;
}
switch (arg2) {
case TARGET_F_GETLK:
if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) {
return -TARGET_EFAULT;
}
__get_user(fl.l_type, &target_fl->l_type);
__get_user(fl.l_whence, &target_fl->l_whence);
__get_user(fl.l_start, &target_fl->l_start);
__get_user(fl.l_len, &target_fl->l_len);
__get_user(fl.l_pid, &target_fl->l_pid);
__get_user(fl.l_sysid, &target_fl->l_sysid);
unlock_user_struct(target_fl, arg3, 0);
ret = get_errno(safe_fcntl(arg1, host_cmd, &fl));
if (!is_error(ret)) {
if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) {
return -TARGET_EFAULT;
}
__put_user(fl.l_type, &target_fl->l_type);
__put_user(fl.l_whence, &target_fl->l_whence);
__put_user(fl.l_start, &target_fl->l_start);
__put_user(fl.l_len, &target_fl->l_len);
__put_user(fl.l_pid, &target_fl->l_pid);
__put_user(fl.l_sysid, &target_fl->l_sysid);
unlock_user_struct(target_fl, arg3, 1);
}
break;
case TARGET_F_SETLK:
case TARGET_F_SETLKW:
if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) {
return -TARGET_EFAULT;
}
__get_user(fl.l_type, &target_fl->l_type);
__get_user(fl.l_whence, &target_fl->l_whence);
__get_user(fl.l_start, &target_fl->l_start);
__get_user(fl.l_len, &target_fl->l_len);
__get_user(fl.l_pid, &target_fl->l_pid);
__get_user(fl.l_sysid, &target_fl->l_sysid);
unlock_user_struct(target_fl, arg3, 0);
ret = get_errno(safe_fcntl(arg1, host_cmd, &fl));
break;
case TARGET_F_DUPFD:
case TARGET_F_DUP2FD:
case TARGET_F_GETOWN:
case TARGET_F_SETOWN:
case TARGET_F_GETFD:
case TARGET_F_SETFD:
case TARGET_F_GETFL:
case TARGET_F_SETFL:
case TARGET_F_READAHEAD:
case TARGET_F_RDAHEAD:
case TARGET_F_ADD_SEALS:
case TARGET_F_GET_SEALS:
default:
ret = get_errno(safe_fcntl(arg1, host_cmd, arg3));
break;
}
return ret;
}
#if defined(__FreeBSD_version) && __FreeBSD_version >= 1300080
extern int __realpathat(int fd, const char *path, char *buf, size_t size,
int flags);
/* https://svnweb.freebsd.org/base?view=revision&revision=358172 */
/* no man page */
static inline abi_long do_freebsd_realpathat(abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
{
abi_long ret;
void *p, *b;
LOCK_PATH(p, arg2);
b = lock_user(VERIFY_WRITE, arg3, arg4, 0);
if (b == NULL) {
UNLOCK_PATH(p, arg2);
return -TARGET_EFAULT;
}
ret = get_errno(__realpathat(arg1, p, b, arg4, arg5));
UNLOCK_PATH(p, arg2);
unlock_user(b, arg3, ret);
return ret;
}
#endif
#endif /* BSD_USER_FREEBSD_OS_STAT_H */

View File

@ -17,17 +17,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* We need the FreeBSD "legacy" definitions. Rust needs the FreeBSD 11 system
* calls since it doesn't use libc at all, so we have to emulate that despite
* FreeBSD 11 being EOL'd.
*/
#define _WANT_FREEBSD11_STAT
#define _WANT_FREEBSD11_STATFS
#define _WANT_FREEBSD11_DIRENT
#define _WANT_KERNEL_ERRNO
#define _WANT_SEMUN
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/path.h"
@ -47,6 +36,9 @@
#include "bsd-file.h"
#include "bsd-proc.h"
/* *BSD dependent syscall shims */
#include "os-stat.h"
/* I/O */
safe_syscall3(int, open, const char *, path, int, flags, mode_t, mode);
safe_syscall4(int, openat, int, fd, const char *, path, int, flags, mode_t,
@ -248,6 +240,7 @@ static abi_long freebsd_syscall(void *cpu_env, int num, abi_long arg1,
case TARGET_FREEBSD_NR_preadv: /* preadv(2) */
ret = do_bsd_preadv(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
break;
case TARGET_FREEBSD_NR_write: /* write(2) */
ret = do_bsd_write(arg1, arg2, arg3);
@ -493,6 +486,113 @@ static abi_long freebsd_syscall(void *cpu_env, int num, abi_long arg1,
ret = do_bsd_undelete(arg1);
break;
/*
* stat system calls
*/
case TARGET_FREEBSD_NR_freebsd11_stat: /* stat(2) */
ret = do_freebsd11_stat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_lstat: /* lstat(2) */
ret = do_freebsd11_lstat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_fstat: /* fstat(2) */
ret = do_freebsd11_fstat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_fstat: /* fstat(2) */
ret = do_freebsd_fstat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_fstatat: /* fstatat(2) */
ret = do_freebsd11_fstatat(arg1, arg2, arg3, arg4);
break;
case TARGET_FREEBSD_NR_fstatat: /* fstatat(2) */
ret = do_freebsd_fstatat(arg1, arg2, arg3, arg4);
break;
case TARGET_FREEBSD_NR_freebsd11_nstat: /* undocumented */
ret = do_freebsd11_nstat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_nfstat: /* undocumented */
ret = do_freebsd11_nfstat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_nlstat: /* undocumented */
ret = do_freebsd11_nlstat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_getfh: /* getfh(2) */
ret = do_freebsd_getfh(arg1, arg2);
break;
case TARGET_FREEBSD_NR_lgetfh: /* lgetfh(2) */
ret = do_freebsd_lgetfh(arg1, arg2);
break;
case TARGET_FREEBSD_NR_fhopen: /* fhopen(2) */
ret = do_freebsd_fhopen(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_fhstat: /* fhstat(2) */
ret = do_freebsd11_fhstat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_fhstat: /* fhstat(2) */
ret = do_freebsd_fhstat(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_fhstatfs: /* fhstatfs(2) */
ret = do_freebsd11_fhstatfs(arg1, arg2);
break;
case TARGET_FREEBSD_NR_fhstatfs: /* fhstatfs(2) */
ret = do_freebsd_fhstatfs(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_statfs: /* statfs(2) */
ret = do_freebsd11_statfs(arg1, arg2);
break;
case TARGET_FREEBSD_NR_statfs: /* statfs(2) */
ret = do_freebsd_statfs(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_fstatfs: /* fstatfs(2) */
ret = do_freebsd11_fstatfs(arg1, arg2);
break;
case TARGET_FREEBSD_NR_fstatfs: /* fstatfs(2) */
ret = do_freebsd_fstatfs(arg1, arg2);
break;
case TARGET_FREEBSD_NR_freebsd11_getfsstat: /* getfsstat(2) */
ret = do_freebsd11_getfsstat(arg1, arg2, arg3);
break;
case TARGET_FREEBSD_NR_getfsstat: /* getfsstat(2) */
ret = do_freebsd_getfsstat(arg1, arg2, arg3);
break;
case TARGET_FREEBSD_NR_freebsd11_getdents: /* getdents(2) */
ret = do_freebsd11_getdents(arg1, arg2, arg3);
break;
case TARGET_FREEBSD_NR_getdirentries: /* getdirentries(2) */
ret = do_freebsd_getdirentries(arg1, arg2, arg3, arg4);
break;
case TARGET_FREEBSD_NR_freebsd11_getdirentries: /* getdirentries(2) */
ret = do_freebsd11_getdirentries(arg1, arg2, arg3, arg4);
break;
case TARGET_FREEBSD_NR_fcntl: /* fcntl(2) */
ret = do_freebsd_fcntl(arg1, arg2, arg3);
break;
/*
* sys{ctl, arch, call}
*/

View File

@ -0,0 +1,50 @@
/*
* FreeBSD conversion extern declarations
*
* Copyright (c) 2013 Stacey D. Son
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef QEMU_OS_H
#define QEMU_OS_H
/* qemu/osdep.h pulls in the rest */
#include <sys/acl.h>
#include <sys/mount.h>
#include <sys/timex.h>
#include <sys/rtprio.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <netinet/in.h>
struct freebsd11_stat;
/* os-stat.c */
abi_long h2t_freebsd11_stat(abi_ulong target_addr,
struct freebsd11_stat *host_st);
abi_long h2t_freebsd11_nstat(abi_ulong target_addr,
struct freebsd11_stat *host_st);
abi_long t2h_freebsd_fhandle(fhandle_t *host_fh, abi_ulong target_addr);
abi_long h2t_freebsd_fhandle(abi_ulong target_addr, fhandle_t *host_fh);
abi_long h2t_freebsd11_statfs(abi_ulong target_addr,
struct freebsd11_statfs *host_statfs);
abi_long target_to_host_fcntl_cmd(int cmd);
abi_long h2t_freebsd_stat(abi_ulong target_addr,
struct stat *host_st);
abi_long h2t_freebsd_statfs(abi_ulong target_addr,
struct statfs *host_statfs);
#endif /* QEMU_OS_H */

View File

@ -164,6 +164,10 @@ static inline void target_cpu_loop(CPUX86State *env)
}
break;
case EXCP_SYSCALL:
/* doesn't do anything */
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;

View File

@ -20,7 +20,6 @@
#ifndef TARGET_ARCH_ELF_H
#define TARGET_ARCH_ELF_H
#define ELF_START_MMAP 0x80000000
#define ELF_ET_DYN_LOAD_ADDR 0x01001000
#define elf_check_arch(x) (((x) == EM_386) || ((x) == EM_486))

View File

@ -473,10 +473,6 @@ int main(int argc, char **argv)
target_environ = envlist_to_environ(envlist, NULL);
envlist_free(envlist);
if (reserved_va) {
mmap_next_start = reserved_va + 1;
}
{
Error *err = NULL;
if (seed_optarg != NULL) {
@ -494,7 +490,49 @@ int main(int argc, char **argv)
* Now that page sizes are configured we can do
* proper page alignment for guest_base.
*/
guest_base = HOST_PAGE_ALIGN(guest_base);
if (have_guest_base) {
if (guest_base & ~qemu_host_page_mask) {
error_report("Selected guest base not host page aligned");
exit(1);
}
}
/*
* If reserving host virtual address space, do so now.
* Combined with '-B', ensure that the chosen range is free.
*/
if (reserved_va) {
void *p;
if (have_guest_base) {
p = mmap((void *)guest_base, reserved_va + 1, PROT_NONE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_EXCL, -1, 0);
} else {
p = mmap(NULL, reserved_va + 1, PROT_NONE,
MAP_ANON | MAP_PRIVATE, -1, 0);
}
if (p == MAP_FAILED) {
const char *err = strerror(errno);
char *sz = size_to_str(reserved_va + 1);
if (have_guest_base) {
error_report("Cannot allocate %s bytes at -B %p for guest "
"address space: %s", sz, (void *)guest_base, err);
} else {
error_report("Cannot allocate %s bytes for guest "
"address space: %s", sz, err);
}
exit(1);
}
guest_base = (uintptr_t)p;
have_guest_base = true;
/* Ensure that mmap_next_start is within range. */
if (reserved_va <= mmap_next_start) {
mmap_next_start = (reserved_va / 4 * 3)
& TARGET_PAGE_MASK & qemu_host_page_mask;
}
}
if (loader_exec(filename, argv + optind, target_environ, regs, info,
&bprm) != 0) {
@ -515,8 +553,6 @@ int main(int argc, char **argv)
fprintf(f, "page layout changed following binary load\n");
page_dump(f);
fprintf(f, "start_brk 0x" TARGET_ABI_FMT_lx "\n",
info->start_brk);
fprintf(f, "end_code 0x" TARGET_ABI_FMT_lx "\n",
info->end_code);
fprintf(f, "start_code 0x" TARGET_ABI_FMT_lx "\n",

View File

@ -32,6 +32,7 @@ void mmap_lock(void)
void mmap_unlock(void)
{
assert(mmap_lock_count > 0);
if (--mmap_lock_count == 0) {
pthread_mutex_unlock(&mmap_mutex);
}
@ -213,8 +214,6 @@ static int mmap_frag(abi_ulong real_start,
#endif
abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
unsigned long last_brk;
/*
* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
* address space.
@ -222,50 +221,16 @@ unsigned long last_brk;
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong alignment)
{
abi_ulong addr;
abi_ulong end_addr;
int prot;
int looped = 0;
abi_ulong ret;
if (size > reserved_va) {
return (abi_ulong)-1;
ret = page_find_range_empty(start, reserved_va, size, alignment);
if (ret == -1 && start > TARGET_PAGE_SIZE) {
/* Restart at the beginning of the address space. */
ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1,
size, alignment);
}
size = HOST_PAGE_ALIGN(size) + alignment;
end_addr = start + size;
if (end_addr > reserved_va) {
end_addr = reserved_va + 1;
}
addr = end_addr - qemu_host_page_size;
while (1) {
if (addr > end_addr) {
if (looped) {
return (abi_ulong)-1;
}
end_addr = reserved_va + 1;
addr = end_addr - qemu_host_page_size;
looped = 1;
continue;
}
prot = page_get_flags(addr);
if (prot) {
end_addr = addr;
}
if (end_addr - addr >= size) {
break;
}
addr -= qemu_host_page_size;
}
if (start == mmap_next_start) {
mmap_next_start = addr;
}
/* addr is sufficiently low to align it up */
if (alignment != 0) {
addr = (addr + alignment) & ~(alignment - 1);
}
return addr;
return ret;
}
/*
@ -293,7 +258,8 @@ static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
if (reserved_va) {
return mmap_find_vma_reserved(start, size,
(alignment != 0 ? 1 << alignment : 0));
(alignment != 0 ? 1 << alignment :
MAX(qemu_host_page_size, TARGET_PAGE_SIZE)));
}
addr = start;
@ -609,7 +575,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
}
/* Reject the mapping if any page within the range is mapped */
if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
errno = EINVAL;
goto fail;
}

View File

@ -38,6 +38,7 @@ extern char **environ;
#include "exec/gdbstub.h"
#include "qemu/clang-tsa.h"
#include "qemu-os.h"
/*
* This struct is used to hold certain information about the image. Basically,
* it replicates in user space what would be certain task_struct fields in the
@ -50,10 +51,7 @@ struct image_info {
abi_ulong end_code;
abi_ulong start_data;
abi_ulong end_data;
abi_ulong start_brk;
abi_ulong brk;
abi_ulong start_mmap;
abi_ulong mmap;
abi_ulong rss;
abi_ulong start_stack;
abi_ulong entry;
@ -232,7 +230,6 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_ulong new_size, unsigned long flags,
abi_ulong new_addr);
int target_msync(abi_ulong start, abi_ulong len, int flags);
extern unsigned long last_brk;
extern abi_ulong mmap_next_start;
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
void TSA_NO_TSA mmap_fork_start(void);
@ -267,7 +264,7 @@ abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2);
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
{
return page_check_range((target_ulong)addr, size, type) == 0;
return page_check_range((target_ulong)addr, size, type);
}
/*
@ -276,50 +273,64 @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
* These are usually used to access struct data members once the struct has been
* locked - usually with lock_user_struct().
*/
#define __put_user(x, hptr)\
({\
int size = sizeof(*hptr);\
switch (size) {\
case 1:\
*(uint8_t *)(hptr) = (uint8_t)(typeof(*hptr))(x);\
break;\
case 2:\
*(uint16_t *)(hptr) = tswap16((typeof(*hptr))(x));\
break;\
case 4:\
*(uint32_t *)(hptr) = tswap32((typeof(*hptr))(x));\
break;\
case 8:\
*(uint64_t *)(hptr) = tswap64((typeof(*hptr))(x));\
break;\
default:\
abort();\
} \
0;\
})
#define __get_user(x, hptr) \
({\
int size = sizeof(*hptr);\
switch (size) {\
case 1:\
x = (typeof(*hptr))*(uint8_t *)(hptr);\
break;\
case 2:\
x = (typeof(*hptr))tswap16(*(uint16_t *)(hptr));\
break;\
case 4:\
x = (typeof(*hptr))tswap32(*(uint32_t *)(hptr));\
break;\
case 8:\
x = (typeof(*hptr))tswap64(*(uint64_t *)(hptr));\
break;\
default:\
x = 0;\
abort();\
} \
0;\
})
/*
* Tricky points:
* - Use __builtin_choose_expr to avoid type promotion from ?:,
* - Invalid sizes result in a compile time error stemming from
* the fact that abort has no parameters.
* - It's easier to use the endian-specific unaligned load/store
* functions than host-endian unaligned load/store plus tswapN.
* - The pragmas are necessary only to silence a clang false-positive
* warning: see https://bugs.llvm.org/show_bug.cgi?id=39113 .
* - gcc has bugs in its _Pragma() support in some versions, eg
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83256 -- so we only
* include the warning-suppression pragmas for clang
*/
#if defined(__clang__) && __has_warning("-Waddress-of-packed-member")
#define PRAGMA_DISABLE_PACKED_WARNING \
_Pragma("GCC diagnostic push"); \
_Pragma("GCC diagnostic ignored \"-Waddress-of-packed-member\"")
#define PRAGMA_REENABLE_PACKED_WARNING \
_Pragma("GCC diagnostic pop")
#else
#define PRAGMA_DISABLE_PACKED_WARNING
#define PRAGMA_REENABLE_PACKED_WARNING
#endif
#define __put_user_e(x, hptr, e) \
do { \
PRAGMA_DISABLE_PACKED_WARNING; \
(__builtin_choose_expr(sizeof(*(hptr)) == 1, stb_p, \
__builtin_choose_expr(sizeof(*(hptr)) == 2, stw_##e##_p, \
__builtin_choose_expr(sizeof(*(hptr)) == 4, stl_##e##_p, \
__builtin_choose_expr(sizeof(*(hptr)) == 8, stq_##e##_p, abort)))) \
((hptr), (x)), (void)0); \
PRAGMA_REENABLE_PACKED_WARNING; \
} while (0)
#define __get_user_e(x, hptr, e) \
do { \
PRAGMA_DISABLE_PACKED_WARNING; \
((x) = (typeof(*hptr))( \
__builtin_choose_expr(sizeof(*(hptr)) == 1, ldub_p, \
__builtin_choose_expr(sizeof(*(hptr)) == 2, lduw_##e##_p, \
__builtin_choose_expr(sizeof(*(hptr)) == 4, ldl_##e##_p, \
__builtin_choose_expr(sizeof(*(hptr)) == 8, ldq_##e##_p, abort)))) \
(hptr)), (void)0); \
PRAGMA_REENABLE_PACKED_WARNING; \
} while (0)
#if TARGET_BIG_ENDIAN
# define __put_user(x, hptr) __put_user_e(x, hptr, be)
# define __get_user(x, hptr) __get_user_e(x, hptr, be)
#else
# define __put_user(x, hptr) __put_user_e(x, hptr, le)
# define __get_user(x, hptr) __get_user_e(x, hptr, le)
#endif
/*
* put_user()/get_user() take a guest address and check access
@ -332,10 +343,10 @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
({ \
abi_ulong __gaddr = (gaddr); \
target_type *__hptr; \
abi_long __ret; \
abi_long __ret = 0; \
__hptr = lock_user(VERIFY_WRITE, __gaddr, sizeof(target_type), 0); \
if (__hptr) { \
__ret = __put_user((x), __hptr); \
__put_user((x), __hptr); \
unlock_user(__hptr, __gaddr, sizeof(target_type)); \
} else \
__ret = -TARGET_EFAULT; \
@ -346,10 +357,10 @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
({ \
abi_ulong __gaddr = (gaddr); \
target_type *__hptr; \
abi_long __ret; \
abi_long __ret = 0; \
__hptr = lock_user(VERIFY_READ, __gaddr, sizeof(target_type), 1); \
if (__hptr) { \
__ret = __get_user((x), __hptr); \
__get_user((x), __hptr); \
unlock_user(__hptr, __gaddr, 0); \
} else { \
(x) = 0; \

View File

@ -787,10 +787,7 @@ static int reset_signal_mask(target_ucontext_t *ucontext)
TaskState *ts = (TaskState *)thread_cpu->opaque;
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
if (__get_user(target_set.__bits[i],
&ucontext->uc_sigmask.__bits[i])) {
return -TARGET_EFAULT;
}
__get_user(target_set.__bits[i], &ucontext->uc_sigmask.__bits[i]);
}
target_to_host_sigset_internal(&blocked, &target_set);
ts->signal_mask = blocked;

View File

@ -45,9 +45,9 @@
*
*/
#if (!defined(TARGET_I386))
typedef int64_t target_freebsd_time_t;
typedef int64_t target_time_t;
#else
typedef int32_t target_freebsd_time_t;
typedef int32_t target_time_t;
#endif
struct target_iovec {
@ -102,7 +102,7 @@ typedef abi_long target_freebsd_suseconds_t;
/* compare to sys/timespec.h */
struct target_freebsd_timespec {
target_freebsd_time_t tv_sec; /* seconds */
target_time_t tv_sec; /* seconds */
abi_long tv_nsec; /* and nanoseconds */
#if !defined(TARGET_I386) && TARGET_ABI_BITS == 32
abi_long _pad;
@ -120,7 +120,7 @@ struct target_freebsd__umtx_time {
};
struct target_freebsd_timeval {
target_freebsd_time_t tv_sec; /* seconds */
target_time_t tv_sec; /* seconds */
target_freebsd_suseconds_t tv_usec;/* and microseconds */
#if !defined(TARGET_I386) && TARGET_ABI_BITS == 32
abi_long _pad;
@ -179,6 +179,217 @@ struct target_freebsd__wrusage {
struct target_freebsd_rusage wru_children;
};
/*
* sys/stat.h
*/
struct target_freebsd11_stat {
uint32_t st_dev; /* inode's device */
uint32_t st_ino; /* inode's number */
int16_t st_mode; /* inode protection mode */
int16_t st_nlink; /* number of hard links */
uint32_t st_uid; /* user ID of the file's owner */
uint32_t st_gid; /* group ID of the file's group */
uint32_t st_rdev; /* device type */
struct target_freebsd_timespec st_atim; /* time last accessed */
struct target_freebsd_timespec st_mtim; /* time last data modification */
struct target_freebsd_timespec st_ctim; /* time last file status change */
int64_t st_size; /* file size, in bytes */
int64_t st_blocks; /* blocks allocated for file */
uint32_t st_blksize; /* optimal blocksize for I/O */
uint32_t st_flags; /* user defined flags for file */
uint32_t st_gen; /* file generation number */
int32_t st_lspare;
struct target_freebsd_timespec st_birthtim; /* time of file creation */
/*
* Explicitly pad st_birthtim to 16 bytes so that the size of
* struct stat is backwards compatible. We use bitfields instead
* of an array of chars so that this doesn't require a C99 compiler
* to compile if the size of the padding is 0. We use 2 bitfields
* to cover up to 64 bits on 32-bit machines. We assume that
* CHAR_BIT is 8...
*/
unsigned int:(8 / 2) * (16 - (int)sizeof(struct target_freebsd_timespec));
unsigned int:(8 / 2) * (16 - (int)sizeof(struct target_freebsd_timespec));
} __packed;
#if defined(__i386__)
#define TARGET_HAS_STAT_TIME_T_EXT 1
#endif
struct target_stat {
uint64_t st_dev; /* inode's device */
uint64_t st_ino; /* inode's number */
uint64_t st_nlink; /* number of hard links */
int16_t st_mode; /* inode protection mode */
int16_t st_padding0;
uint32_t st_uid; /* user ID of the file's owner */
uint32_t st_gid; /* group ID of the file's group */
int32_t st_padding1;
uint64_t st_rdev; /* device type */
#ifdef TARGET_HAS_STAT_TIME_T_EXT
int32_t st_atim_ext;
#endif
struct target_freebsd_timespec st_atim; /* time of last access */
#ifdef TARGET_HAS_STAT_TIME_T_EXT
int32_t st_mtim_ext;
#endif
struct target_freebsd_timespec st_mtim; /* time of last data modification */
#ifdef TARGET_HAS_STAT_TIME_T_EXT
int32_t st_ctim_ext;
#endif
struct target_freebsd_timespec st_ctim;/* time of last file status change */
#ifdef TARGET_HAS_STAT_TIME_T_EXT
int32_t st_btim_ext;
#endif
struct target_freebsd_timespec st_birthtim; /* time of file creation */
int64_t st_size; /* file size, in bytes */
int64_t st_blocks; /* blocks allocated for file */
uint32_t st_blksize; /* optimal blocksize for I/O */
uint32_t st_flags; /* user defined flags for file */
uint64_t st_gen; /* file generation number */
uint64_t st_spare[10];
};
/* struct nstat is the same as stat above but without the st_lspare field */
struct target_freebsd11_nstat {
uint32_t st_dev; /* inode's device */
uint32_t st_ino; /* inode's number */
int16_t st_mode; /* inode protection mode */
int16_t st_nlink; /* number of hard links */
uint32_t st_uid; /* user ID of the file's owner */
uint32_t st_gid; /* group ID of the file's group */
uint32_t st_rdev; /* device type */
struct target_freebsd_timespec st_atim; /* time last accessed */
struct target_freebsd_timespec st_mtim; /* time last data modification */
struct target_freebsd_timespec st_ctim; /* time last file status change */
int64_t st_size; /* file size, in bytes */
int64_t st_blocks; /* blocks allocated for file */
uint32_t st_blksize; /* optimal blocksize for I/O */
uint32_t st_flags; /* user defined flags for file */
uint32_t st_gen; /* file generation number */
struct target_freebsd_timespec st_birthtim; /* time of file creation */
/*
* Explicitly pad st_birthtim to 16 bytes so that the size of
* struct stat is backwards compatible. We use bitfields instead
* of an array of chars so that this doesn't require a C99 compiler
* to compile if the size of the padding is 0. We use 2 bitfields
* to cover up to 64 bits on 32-bit machines. We assume that
* CHAR_BIT is 8...
*/
unsigned int:(8 / 2) * (16 - (int)sizeof(struct target_freebsd_timespec));
unsigned int:(8 / 2) * (16 - (int)sizeof(struct target_freebsd_timespec));
} __packed;
/*
* sys/mount.h
*/
/* filesystem id type */
typedef struct target_freebsd_fsid { int32_t val[2]; } target_freebsd_fsid_t;
/* filesystem statistics */
struct target_freebsd11_statfs {
uint32_t f_version; /* structure version number */
uint32_t f_type; /* type of filesystem */
uint64_t f_flags; /* copy of mount exported flags */
uint64_t f_bsize; /* filesystem fragment size */
uint64_t f_iosize; /* optimal transfer block size */
uint64_t f_blocks; /* total data blocks in filesystem */
uint64_t f_bfree; /* free blocks in filesystem */
int64_t f_bavail; /* free blocks avail to non-superuser */
uint64_t f_files; /* total file nodes in filesystem */
int64_t f_ffree; /* free nodes avail to non-superuser */
uint64_t f_syncwrites; /* count of sync writes since mount */
uint64_t f_asyncwrites; /* count of async writes since mount */
uint64_t f_syncreads; /* count of sync reads since mount */
uint64_t f_asyncreads; /* count of async reads since mount */
uint64_t f_spare[10]; /* unused spare */
uint32_t f_namemax; /* maximum filename length */
uint32_t f_owner; /* user that mounted the filesystem */
target_freebsd_fsid_t f_fsid; /* filesystem id */
char f_charspare[80]; /* spare string space */
char f_fstypename[16]; /* filesys type name */
char f_mntfromname[88]; /* mount filesystem */
char f_mntonname[88]; /* dir on which mounted*/
};
struct target_statfs {
uint32_t f_version; /* structure version number */
uint32_t f_type; /* type of filesystem */
uint64_t f_flags; /* copy of mount exported flags */
uint64_t f_bsize; /* filesystem fragment size */
uint64_t f_iosize; /* optimal transfer block size */
uint64_t f_blocks; /* total data blocks in filesystem */
uint64_t f_bfree; /* free blocks in filesystem */
int64_t f_bavail; /* free blocks avail to non-superuser */
uint64_t f_files; /* total file nodes in filesystem */
int64_t f_ffree; /* free nodes avail to non-superuser */
uint64_t f_syncwrites; /* count of sync writes since mount */
uint64_t f_asyncwrites; /* count of async writes since mount */
uint64_t f_syncreads; /* count of sync reads since mount */
uint64_t f_asyncreads; /* count of async reads since mount */
uint64_t f_spare[10]; /* unused spare */
uint32_t f_namemax; /* maximum filename length */
uint32_t f_owner; /* user that mounted the filesystem */
target_freebsd_fsid_t f_fsid; /* filesystem id */
char f_charspare[80]; /* spare string space */
char f_fstypename[16]; /* filesystem type name */
char f_mntfromname[1024]; /* mounted filesystem */
char f_mntonname[1024]; /* directory on which mounted */
};
/* File identifier. These are unique per filesystem on a single machine. */
#define TARGET_MAXFIDSZ 16
struct target_freebsd_fid {
uint16_t fid_len; /* len of data in bytes */
uint16_t fid_data0; /* force longword align */
char fid_data[TARGET_MAXFIDSZ]; /* data (variable len) */
};
/* Generic file handle */
struct target_freebsd_fhandle {
target_freebsd_fsid_t fh_fsid; /* Filesystem id of mount point */
struct target_freebsd_fid fh_fid; /* Filesys specific id */
};
typedef struct target_freebsd_fhandle target_freebsd_fhandle_t;
/*
* sys/fcntl.h
*/
#define TARGET_F_DUPFD 0
#define TARGET_F_GETFD 1
#define TARGET_F_SETFD 2
#define TARGET_F_GETFL 3
#define TARGET_F_SETFL 4
#define TARGET_F_GETOWN 5
#define TARGET_F_SETOWN 6
#define TARGET_F_OGETLK 7
#define TARGET_F_OSETLK 8
#define TARGET_F_OSETLKW 9
#define TARGET_F_DUP2FD 10
#define TARGET_F_GETLK 11
#define TARGET_F_SETLK 12
#define TARGET_F_SETLKW 13
#define TARGET_F_SETLK_REMOTE 14
#define TARGET_F_READAHEAD 15
#define TARGET_F_RDAHEAD 16
#define TARGET_F_DUPFD_CLOEXEC 17
#define TARGET_F_DUP2FD_CLOEXEC 18
/* FreeBSD-specific */
#define TARGET_F_ADD_SEALS 19
#define TARGET_F_GET_SEALS 20
struct target_freebsd_flock {
int64_t l_start;
int64_t l_len;
int32_t l_pid;
int16_t l_type;
int16_t l_whence;
int32_t l_sysid;
} QEMU_PACKED;
#define safe_syscall0(type, name) \
type safe_##name(void) \
{ \
@ -226,8 +437,12 @@ type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
return safe_syscall(SYS_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
}
#define safe_fcntl(...) safe_syscall(SYS_fcntl, __VA_ARGS__)
/* So far all target and host bitmasks are the same */
#undef target_to_host_bitmask
#define target_to_host_bitmask(x, tbl) (x)
#undef host_to_target_bitmask
#define host_to_target_bitmask(x, tbl) (x)
#endif /* SYSCALL_DEFS_H */

View File

@ -20,7 +20,6 @@
#ifndef TARGET_ARCH_ELF_H
#define TARGET_ARCH_ELF_H
#define ELF_START_MMAP 0x2aaaaab000ULL
#define ELF_ET_DYN_LOAD_ADDR 0x01021000
#define elf_check_arch(x) (((x) == ELF_ARCH))

View File

@ -742,8 +742,12 @@ static void tcp_chr_websock_handshake(QIOTask *task, gpointer user_data)
{
Chardev *chr = user_data;
SocketChardev *s = user_data;
Error *err = NULL;
if (qio_task_propagate_error(task, NULL)) {
if (qio_task_propagate_error(task, &err)) {
error_reportf_err(err,
"websock handshake of character device %s failed: ",
chr->label);
tcp_chr_disconnect(chr);
} else {
if (s->do_telnetopt) {
@ -778,8 +782,12 @@ static void tcp_chr_tls_handshake(QIOTask *task,
{
Chardev *chr = user_data;
SocketChardev *s = user_data;
Error *err = NULL;
if (qio_task_propagate_error(task, NULL)) {
if (qio_task_propagate_error(task, &err)) {
error_reportf_err(err,
"TLS handshake of character device %s failed: ",
chr->label);
tcp_chr_disconnect(chr);
} else {
if (s->is_websock) {

View File

@ -190,7 +190,7 @@ static void qemu_chr_open_stdio(Chardev *chr,
}
}
dwMode |= ENABLE_LINE_INPUT;
dwMode |= ENABLE_LINE_INPUT | ENABLE_VIRTUAL_TERMINAL_INPUT;
if (is_console) {
/* set the terminal in raw mode */

View File

@ -7,6 +7,7 @@
#CONFIG_VFIO_CCW=n
#CONFIG_VIRTIO_PCI=n
#CONFIG_WDT_DIAG288=n
#CONFIG_PCIE_DEVICES=n
# Boards:
#

View File

@ -1,5 +1,5 @@
TARGET_ARCH=loongarch64
TARGET_BASE_ARCH=loongarch
TARGET_SUPPORTS_MTTCG=y
TARGET_XML_FILES= gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml
TARGET_XML_FILES= gdb-xml/loongarch-base32.xml gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml
TARGET_NEED_FDT=y

205
configure vendored
View File

@ -451,7 +451,11 @@ elif check_define __s390__ ; then
cpu="s390"
fi
elif check_define __riscv ; then
cpu="riscv"
if check_define _LP64 ; then
cpu="riscv64"
else
cpu="riscv32"
fi
elif check_define __arm__ ; then
cpu="arm"
elif check_define __aarch64__ ; then
@ -465,50 +469,119 @@ else
echo "WARNING: unrecognized host CPU, proceeding with 'uname -m' output '$cpu'"
fi
# Normalise host CPU name and set multilib cflags. The canonicalization
# isn't really necessary, because the architectures that we check for
# should not hit the 'uname -m' case, but better safe than sorry.
# Normalise host CPU name to the values used by Meson cross files and in source
# directories, and set multilib cflags. The canonicalization isn't really
# necessary, because the architectures that we check for should not hit the
# 'uname -m' case, but better safe than sorry in case --cpu= is used.
#
# Note that this case should only have supported host CPUs, not guests.
# Please keep it sorted and synchronized with meson.build's host_arch.
host_arch=
linux_arch=
case "$cpu" in
aarch64)
host_arch=aarch64
linux_arch=arm64
;;
armv*b|armv*l|arm)
cpu="arm" ;;
cpu=arm
host_arch=arm
linux_arch=arm
;;
i386|i486|i586|i686)
cpu="i386"
CPU_CFLAGS="-m32" ;;
host_arch=i386
linux_arch=x86
CPU_CFLAGS="-m32"
;;
loongarch*)
cpu=loongarch64
host_arch=loongarch64
;;
mips64*)
cpu=mips64
host_arch=mips
linux_arch=mips
;;
mips*)
cpu=mips
host_arch=mips
linux_arch=mips
;;
ppc)
host_arch=ppc
linux_arch=powerpc
CPU_CFLAGS="-m32"
;;
ppc64)
host_arch=ppc64
linux_arch=powerpc
CPU_CFLAGS="-m64 -mbig-endian"
;;
ppc64le)
cpu=ppc64
host_arch=ppc64
linux_arch=powerpc
CPU_CFLAGS="-m64 -mlittle-endian"
;;
riscv32 | riscv64)
host_arch=riscv
linux_arch=riscv
;;
s390)
linux_arch=s390
CPU_CFLAGS="-m31"
;;
s390x)
host_arch=s390x
linux_arch=s390
CPU_CFLAGS="-m64"
;;
sparc|sun4[cdmuv])
cpu=sparc
CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc"
;;
sparc64)
host_arch=sparc64
CPU_CFLAGS="-m64 -mcpu=ultrasparc"
;;
x32)
cpu="x86_64"
CPU_CFLAGS="-mx32" ;;
host_arch=x86_64
linux_arch=x86
CPU_CFLAGS="-mx32"
;;
x86_64|amd64)
cpu="x86_64"
host_arch=x86_64
linux_arch=x86
# ??? Only extremely old AMD cpus do not have cmpxchg16b.
# If we truly care, we should simply detect this case at
# runtime and generate the fallback to serial emulation.
CPU_CFLAGS="-m64 -mcx16" ;;
mips*)
cpu="mips" ;;
ppc)
CPU_CFLAGS="-m32" ;;
ppc64)
CPU_CFLAGS="-m64 -mbig-endian" ;;
ppc64le)
cpu="ppc64"
CPU_CFLAGS="-m64 -mlittle-endian" ;;
s390)
CPU_CFLAGS="-m31" ;;
s390x)
CPU_CFLAGS="-m64" ;;
sparc|sun4[cdmuv])
cpu="sparc"
CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc" ;;
sparc64)
CPU_CFLAGS="-m64 -mcpu=ultrasparc" ;;
CPU_CFLAGS="-m64 -mcx16"
;;
esac
if test -n "$host_arch" && {
! test -d "$source_path/linux-user/include/host/$host_arch" ||
! test -d "$source_path/common-user/host/$host_arch"; }; then
error_exit "linux-user/include/host/$host_arch does not exist." \
"This is a bug in the configure script, please report it."
fi
if test -n "$linux_arch" && ! test -d "$source_path/linux-headers/asm-$linux_arch"; then
error_exit "linux-headers/asm-$linux_arch does not exist." \
"This is a bug in the configure script, please report it."
fi
check_py_version() {
# We require python >= 3.7.
# NB: a True python conditional creates a non-zero return code (Failure)
@ -766,12 +839,15 @@ for opt do
# everything else has the same name in configure and meson
--*) meson_option_parse "$opt" "$optarg"
;;
# Pass through -Dxxxx options to meson
-D*) meson_options="$meson_options $opt"
;;
esac
done
if ! test -e "$source_path/.git"
then
git_submodules_action="ignore"
git_submodules_action="validate"
fi
# test for any invalid configuration combinations
@ -809,7 +885,7 @@ default_target_list=""
mak_wilds=""
if [ "$linux_user" != no ]; then
if [ "$targetos" = linux ] && [ -d "$source_path/linux-user/include/host/$cpu" ]; then
if [ "$targetos" = linux ] && [ -n "$host_arch" ]; then
linux_user=yes
elif [ "$linux_user" = yes ]; then
error_exit "linux-user not supported on this architecture"
@ -855,6 +931,7 @@ $(echo Available targets: $default_target_list | \
--target-list-exclude=LIST exclude a set of targets from the default target-list
Advanced options (experts only):
-Dmesonoptname=val passthrough option to meson unmodified
--cross-prefix=PREFIX use PREFIX for compile tools, PREFIX can be blank [$cross_prefix]
--cc=CC use C compiler CC [$cc]
--host-cc=CC use C compiler CC [$host_cc] for code run at
@ -954,19 +1031,14 @@ fi
python="$python -B"
mkvenv="$python ${source_path}/python/scripts/mkvenv.py"
mkvenv_flags=""
if test "$download" = "enabled" ; then
mkvenv_flags="--online"
fi
# Finish preparing the virtual environment using vendored .whl files
if ! $mkvenv ensure \
$mkvenv_flags \
--dir "${source_path}/python/wheels" \
--diagnose "meson" \
"meson>=0.63.0" ;
then
exit 1
if $python -c 'import sys; sys.exit(sys.version_info >= (3,11))'; then
$mkvenv ensure --dir "${source_path}/python/wheels" \
'tomli>=1.2.0' || exit 1
fi
$mkvenv ensuregroup --dir "${source_path}/python/wheels" \
${source_path}/pythondeps.toml meson || exit 1
# At this point, we expect Meson to be installed and available.
# We expect mkvenv or pip to have created pyvenv/bin/meson for us.
@ -983,10 +1055,9 @@ if test "$download" = "enabled" -a "$docs" = "enabled" ; then
fi
if test "$docs" != "disabled" ; then
if ! $mkvenv ensure \
if ! $mkvenv ensuregroup \
$mkvenv_flags \
--diagnose "sphinx-build" \
"sphinx>=1.6.0" "sphinx-rtd-theme>=0.5.0";
${source_path}/pythondeps.toml docs;
then
if test "$docs" = "enabled" ; then
exit 1
@ -1207,6 +1278,7 @@ fi
: ${cross_prefix_sh4="sh4-linux-gnu-"}
: ${cross_prefix_sparc64="sparc64-linux-gnu-"}
: ${cross_prefix_sparc="$cross_prefix_sparc64"}
: ${cross_prefix_tricore="tricore-"}
: ${cross_prefix_x86_64="x86_64-linux-gnu-"}
: ${cross_cc_aarch64_be="$cross_cc_aarch64"}
@ -1293,7 +1365,7 @@ probe_target_compiler() {
sh4) container_hosts=x86_64 ;;
sparc64) container_hosts=x86_64 ;;
tricore) container_hosts=x86_64 ;;
x86_64) container_hosts="aarch64 ppc64el x86_64" ;;
x86_64) container_hosts="aarch64 ppc64le x86_64" ;;
xtensa*) container_hosts=x86_64 ;;
esac
@ -1394,17 +1466,12 @@ probe_target_compiler() {
tricore)
container_image=debian-tricore-cross
container_cross_prefix=tricore-
container_cross_as=tricore-as
container_cross_ld=tricore-ld
container_cross_cc=tricore-gcc
break
;;
x86_64)
container_image=debian-amd64-cross
container_cross_prefix=x86_64-linux-gnu-
;;
xtensa*)
container_hosts=x86_64
container_image=debian-xtensa-cross
# default to the dc232b cpu
@ -1721,43 +1788,14 @@ echo "PKG_CONFIG=${pkg_config}" >> $config_host_mak
echo "CC=$cc" >> $config_host_mak
echo "EXESUF=$EXESUF" >> $config_host_mak
# use included Linux headers
if test "$linux" = "yes" ; then
mkdir -p linux-headers
case "$cpu" in
i386|x86_64)
linux_arch=x86
;;
ppc|ppc64)
linux_arch=powerpc
;;
s390x)
linux_arch=s390
;;
aarch64)
linux_arch=arm64
;;
loongarch*)
linux_arch=loongarch
;;
mips64)
linux_arch=mips
;;
*)
# For most CPUs the kernel architecture name and QEMU CPU name match.
linux_arch="$cpu"
;;
esac
# For non-KVM architectures we will not have asm headers
if [ -e "$source_path/linux-headers/asm-$linux_arch" ]; then
# use included Linux headers for KVM architectures
if test "$linux" = "yes" && test -n "$linux_arch"; then
symlink "$source_path/linux-headers/asm-$linux_arch" linux-headers/asm
fi
fi
for target in $target_list; do
target_dir="$target"
target_name=$(echo $target | cut -d '-' -f 1)$EXESUF
mkdir -p "$target_dir"
case $target in
*-user) symlink "../qemu-$target_name" "$target_dir/qemu-$target_name" ;;
*) symlink "../qemu-system-$target_name" "$target_dir/qemu-system-$target_name" ;;
@ -1916,6 +1954,7 @@ if test "$skip_meson" = no; then
if test "$?" -ne 0 ; then
error_exit "meson setup failed"
fi
echo "$meson" > build.ninja.stamp
else
if test -f meson-private/cmd_line.txt; then
# Adjust old command line options that were removed

View File

@ -316,6 +316,11 @@ static int fill_context(KDDEBUGGER_DATA64 *kdbg,
return 1;
}
if (!Prcb) {
eprintf("Context for CPU #%d is missing\n", i);
continue;
}
if (va_space_rw(vs, Prcb + kdbg->OffsetPrcbContext,
&Context, sizeof(Context), 0)) {
eprintf("Failed to read CPU #%d ContextFrame location\n", i);

View File

@ -772,7 +772,7 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
for (i = 0; i < argc; i++) {
char *opt = argv[i];
g_autofree char **tokens = g_strsplit(opt, "=", 2);
g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
if (g_strcmp0(tokens[0], "iblksize") == 0) {
l1_iblksize = STRTOLL(tokens[1]);

View File

@ -148,7 +148,7 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
int argc, char **argv)
{
for (int i = 0; i < argc; i++) {
g_autofree char **tokens = g_strsplit(argv[i], "=", 2);
g_auto(GStrv) tokens = g_strsplit(argv[i], "=", 2);
if (g_strcmp0(tokens[0], "filename") == 0) {
file_name = g_strdup(tokens[1]);
}

View File

@ -227,7 +227,7 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
for (int i = 0; i < argc; i++) {
char *opt = argv[i];
g_autofree char **tokens = g_strsplit(opt, "=", 2);
g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
if (g_strcmp0(tokens[0], "ifilter") == 0) {
parse_insn_match(tokens[1]);
} else if (g_strcmp0(tokens[0], "afilter") == 0) {

Some files were not shown because too many files have changed in this diff Show More