merge upstream
This commit is contained in:
commit
3edab00190
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1,3 +1,4 @@
|
||||
*.c.inc diff=c
|
||||
*.h.inc diff=c
|
||||
*.m diff=objc
|
||||
*.py diff=python
|
||||
|
@ -515,8 +515,6 @@ build-oss-fuzz:
|
||||
echo Testing ${fuzzer} ... ;
|
||||
"${fuzzer}" -runs=1 -seed=1 || exit 1 ;
|
||||
done
|
||||
# Unrelated to fuzzer: run some tests with -fsanitize=address
|
||||
- cd build-oss-fuzz && make check-qtest-i386 check-unit
|
||||
|
||||
build-tci:
|
||||
extends: .native_build_job_template
|
||||
|
@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip-3.8'
|
||||
PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv p5-Test-Harness perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
|
||||
PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
||||
|
@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip-3.8'
|
||||
PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv p5-Test-Harness perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
|
||||
PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
||||
|
@ -5,12 +5,12 @@
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/usr/local/bin/ccache'
|
||||
CPAN_PKGS='Test::Harness'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='brew'
|
||||
PIP3='/usr/local/bin/pip3'
|
||||
PKGS='bash bc bzip2 capstone ccache cpanminus ctags curl dbus diffutils dtc gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 zlib zstd'
|
||||
PKGS='bash bc bzip2 capstone ccache ctags curl dbus diffutils dtc gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme virtualenv'
|
||||
PYTHON='/usr/local/bin/python3'
|
||||
|
@ -21,18 +21,10 @@ amd64-debian-user-cross-container:
|
||||
|
||||
arm64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers-layer2
|
||||
needs: ['amd64-debian10-container']
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-arm64-cross
|
||||
|
||||
arm64-test-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers-layer2
|
||||
needs: ['amd64-debian11-container']
|
||||
variables:
|
||||
NAME: debian-arm64-test-cross
|
||||
|
||||
armel-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers-layer2
|
||||
@ -139,10 +131,16 @@ riscv64-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-riscv64-cross
|
||||
|
||||
s390x-debian-cross-container:
|
||||
# we can however build TCG tests using a non-sid base
|
||||
riscv64-debian-test-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers-layer2
|
||||
needs: ['amd64-debian10-container']
|
||||
variables:
|
||||
NAME: debian-riscv64-test-cross
|
||||
|
||||
s390x-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-s390x-cross
|
||||
|
||||
|
@ -14,6 +14,7 @@ variables:
|
||||
GIT_STRATEGY: clone
|
||||
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-18.04-s390x.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-aarch64.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-aarch32.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml'
|
||||
|
23
.gitlab-ci.d/custom-runners/ubuntu-20.04-aarch32.yml
Normal file
23
.gitlab-ci.d/custom-runners/ubuntu-20.04-aarch32.yml
Normal file
@ -0,0 +1,23 @@
|
||||
# All ubuntu-20.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/qemu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 18.04/20.04"
|
||||
|
||||
ubuntu-20.04-aarch32-all:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_20.04
|
||||
- aarch32
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$AARCH32_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --cross-prefix=arm-linux-gnueabihf-
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
@ -1,13 +1,15 @@
|
||||
# All ubuntu-18.04 jobs should run successfully in an environment
|
||||
# All ubuntu-20.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 18.04/20.04"
|
||||
# "Install basic packages to build QEMU on Ubuntu 20.04/20.04"
|
||||
|
||||
ubuntu-18.04-s390x-all-linux-static:
|
||||
ubuntu-20.04-s390x-all-linux-static:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_18.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
variables:
|
||||
DFLTCC: 0
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
@ -21,12 +23,14 @@ ubuntu-18.04-s390x-all-linux-static:
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
- make --output-sync -j`nproc` check-tcg V=1
|
||||
|
||||
ubuntu-18.04-s390x-all:
|
||||
ubuntu-20.04-s390x-all:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_18.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
variables:
|
||||
DFLTCC: 0
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
@ -37,12 +41,14 @@ ubuntu-18.04-s390x-all:
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
|
||||
ubuntu-18.04-s390x-alldbg:
|
||||
ubuntu-20.04-s390x-alldbg:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_18.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
variables:
|
||||
DFLTCC: 0
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
@ -58,12 +64,14 @@ ubuntu-18.04-s390x-alldbg:
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
|
||||
ubuntu-18.04-s390x-clang:
|
||||
ubuntu-20.04-s390x-clang:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_18.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
variables:
|
||||
DFLTCC: 0
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
@ -78,12 +86,14 @@ ubuntu-18.04-s390x-clang:
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check V=1
|
||||
|
||||
ubuntu-18.04-s390x-tci:
|
||||
ubuntu-20.04-s390x-tci:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_18.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
variables:
|
||||
DFLTCC: 0
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
@ -97,12 +107,14 @@ ubuntu-18.04-s390x-tci:
|
||||
- ../configure --disable-libssh --enable-tcg-interpreter
|
||||
- make --output-sync -j`nproc`
|
||||
|
||||
ubuntu-18.04-s390x-notcg:
|
||||
ubuntu-20.04-s390x-notcg:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_18.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
variables:
|
||||
DFLTCC: 0
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# Docker image to cross-compile EDK2 firmware binaries
|
||||
#
|
||||
FROM ubuntu:16.04
|
||||
FROM ubuntu:18.04
|
||||
|
||||
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
|
||||
@ -20,7 +20,7 @@ RUN apt update \
|
||||
iasl \
|
||||
make \
|
||||
nasm \
|
||||
python \
|
||||
python3 \
|
||||
uuid-dev \
|
||||
&& \
|
||||
\
|
||||
|
9
.mailmap
9
.mailmap
@ -28,7 +28,11 @@ Thiemo Seufer <ths@networkno.de> ths <ths@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
|
||||
# Corrupted Author fields
|
||||
Aaron Larson <alarson@ddci.com> alarson@ddci.com
|
||||
Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
|
||||
Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
|
||||
Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
|
||||
Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
|
||||
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
|
||||
|
||||
# There is also a:
|
||||
@ -52,7 +56,8 @@ Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
|
||||
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
|
||||
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
|
||||
Frederic Konrad <konrad@adacore.com> <fred.konrad@greensocs.com>
|
||||
Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com>
|
||||
Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com>
|
||||
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
|
||||
@ -70,6 +75,7 @@ Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
|
||||
# Also list preferred name forms where people have changed their
|
||||
# git author config, or had utf8/latin1 encoding issues.
|
||||
Aaron Lindsay <aaron@os.amperecomputing.com>
|
||||
Aaron Larson <alarson@ddci.com>
|
||||
Alexey Gerasimenko <x1917x@gmail.com>
|
||||
Alex Chen <alex.chen@huawei.com>
|
||||
Alex Ivanov <void@aleksoft.net>
|
||||
@ -144,6 +150,7 @@ Pan Nengyuan <pannengyuan@huawei.com>
|
||||
Pavel Dovgaluk <dovgaluk@ispras.ru>
|
||||
Pavel Dovgaluk <pavel.dovgaluk@gmail.com>
|
||||
Pavel Dovgaluk <Pavel.Dovgaluk@ispras.ru>
|
||||
Peter Chubb <peter.chubb@nicta.com.au>
|
||||
Peter Crosthwaite <crosthwaite.peter@gmail.com>
|
||||
Peter Crosthwaite <peter.crosthwaite@petalogix.com>
|
||||
Peter Crosthwaite <peter.crosthwaite@xilinx.com>
|
||||
|
18
.travis.yml
18
.travis.yml
@ -1,6 +1,3 @@
|
||||
# The current Travis default is a VM based 16.04 Xenial on GCE
|
||||
# Additional builds with specific requirements for a full VM need to
|
||||
# be added as additional matrix: entries later on
|
||||
os: linux
|
||||
dist: focal
|
||||
language: c
|
||||
@ -190,7 +187,7 @@ jobs:
|
||||
|
||||
- name: "[s390x] GCC check-tcg"
|
||||
arch: s390x
|
||||
dist: bionic
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@ -221,6 +218,7 @@ jobs:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers --target-list=${MAIN_SOFTMMU_TARGETS},s390x-linux-user"
|
||||
- UNRELIABLE=true
|
||||
- DFLTCC=0
|
||||
script:
|
||||
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
|
||||
- |
|
||||
@ -233,7 +231,7 @@ jobs:
|
||||
|
||||
- name: "[s390x] GCC (other-softmmu)"
|
||||
arch: s390x
|
||||
dist: bionic
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@ -260,21 +258,22 @@ jobs:
|
||||
env:
|
||||
- CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
|
||||
--target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
|
||||
|
||||
- DFLTCC=0
|
||||
- name: "[s390x] GCC (user)"
|
||||
arch: s390x
|
||||
dist: bionic
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libgcrypt20-dev
|
||||
- libglib2.0-dev
|
||||
- libgnutls28-dev
|
||||
- ninja-build
|
||||
env:
|
||||
- CONFIG="--disable-containers --disable-system"
|
||||
|
||||
- DFLTCC=0
|
||||
- name: "[s390x] Clang (disable-tcg)"
|
||||
arch: s390x
|
||||
dist: bionic
|
||||
dist: focal
|
||||
compiler: clang
|
||||
addons:
|
||||
apt_packages:
|
||||
@ -305,3 +304,4 @@ jobs:
|
||||
- CONFIG="--disable-containers --disable-tcg --enable-kvm
|
||||
--disable-tools --host-cc=clang --cxx=clang++"
|
||||
- UNRELIABLE=true
|
||||
- DFLTCC=0
|
||||
|
58
MAINTAINERS
58
MAINTAINERS
@ -305,7 +305,6 @@ S: Maintained
|
||||
F: target/s390x/
|
||||
F: target/s390x/tcg
|
||||
F: hw/s390x/
|
||||
F: disas/s390.c
|
||||
F: tests/tcg/s390x/
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
||||
@ -548,6 +547,12 @@ F: include/*/*win32*
|
||||
X: qga/*win32*
|
||||
F: qemu.nsi
|
||||
|
||||
Darwin (macOS, iOS)
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
S: Odd Fixes
|
||||
F: .gitlab-ci.d/cirrus/macos-*
|
||||
F: */*.m
|
||||
|
||||
Alpha Machines
|
||||
--------------
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
@ -642,7 +647,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/*/exynos*
|
||||
F: include/hw/arm/exynos4210.h
|
||||
F: include/hw/*/exynos*
|
||||
|
||||
Calxeda Highbank
|
||||
M: Rob Herring <robh@kernel.org>
|
||||
@ -936,6 +941,7 @@ S: Maintained
|
||||
F: hw/arm/virt*
|
||||
F: include/hw/arm/virt.h
|
||||
F: docs/system/arm/virt.rst
|
||||
F: tests/avocado/machine_aarch64_virt.py
|
||||
|
||||
Xilinx Zynq
|
||||
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
|
||||
@ -1527,7 +1533,7 @@ F: include/hw/rtc/sun4v-rtc.h
|
||||
|
||||
Leon3
|
||||
M: Fabien Chouteau <chouteau@adacore.com>
|
||||
M: KONRAD Frederic <frederic.konrad@adacore.com>
|
||||
M: Frederic Konrad <konrad.frederic@yahoo.fr>
|
||||
S: Maintained
|
||||
F: hw/sparc/leon3.c
|
||||
F: hw/*/grlib*
|
||||
@ -1819,7 +1825,6 @@ F: docs/specs/acpi_hw_reduced_hotplug.rst
|
||||
|
||||
ACPI/VIOT
|
||||
M: Jean-Philippe Brucker <jean-philippe@linaro.org>
|
||||
R: Ani Sinha <ani@anisinha.ca>
|
||||
S: Supported
|
||||
F: hw/acpi/viot.c
|
||||
F: hw/acpi/viot.h
|
||||
@ -2415,6 +2420,7 @@ F: audio/alsaaudio.c
|
||||
|
||||
Core Audio framework backend
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
R: Christian Schoenebeck <qemu_oss@crudebyte.com>
|
||||
R: Akihiko Odaki <akihiko.odaki@gmail.com>
|
||||
S: Odd Fixes
|
||||
@ -2501,7 +2507,7 @@ F: scsi/*
|
||||
|
||||
Block Jobs
|
||||
M: John Snow <jsnow@redhat.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: blockjob.c
|
||||
@ -2515,9 +2521,12 @@ F: block/stream.c
|
||||
F: block/mirror.c
|
||||
F: qapi/job.json
|
||||
F: block/block-copy.c
|
||||
F: include/block/block-copy.c
|
||||
F: include/block/block-copy.h
|
||||
F: block/reqlist.c
|
||||
F: include/block/reqlist.h
|
||||
F: block/copy-before-write.h
|
||||
F: block/copy-before-write.c
|
||||
F: block/snapshot-access.c
|
||||
F: include/block/aio_task.h
|
||||
F: block/aio_task.c
|
||||
F: util/qemu-co-shared-resource.c
|
||||
@ -2537,7 +2546,7 @@ T: git https://repo.or.cz/qemu/armbru.git block-next
|
||||
|
||||
Dirty Bitmaps
|
||||
M: Eric Blake <eblake@redhat.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
|
||||
R: John Snow <jsnow@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
@ -2669,6 +2678,7 @@ F: util/drm.c
|
||||
|
||||
Cocoa graphics
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
R: Akihiko Odaki <akihiko.odaki@gmail.com>
|
||||
S: Odd Fixes
|
||||
F: ui/cocoa.m
|
||||
@ -2747,26 +2757,26 @@ F: backends/cryptodev*.c
|
||||
Python library
|
||||
M: John Snow <jsnow@redhat.com>
|
||||
M: Cleber Rosa <crosa@redhat.com>
|
||||
R: Eduardo Habkost <eduardo@habkost.net>
|
||||
R: Beraldo Leal <bleal@redhat.com>
|
||||
S: Maintained
|
||||
F: python/
|
||||
T: git https://gitlab.com/jsnow/qemu.git python
|
||||
|
||||
Python scripts
|
||||
M: Eduardo Habkost <eduardo@habkost.net>
|
||||
M: John Snow <jsnow@redhat.com>
|
||||
M: Cleber Rosa <crosa@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: scripts/*.py
|
||||
F: tests/*.py
|
||||
|
||||
Benchmark util
|
||||
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
|
||||
S: Maintained
|
||||
F: scripts/simplebench/
|
||||
T: git https://src.openvz.org/scm/~vsementsov/qemu.git simplebench
|
||||
|
||||
Transactions helper
|
||||
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
|
||||
S: Maintained
|
||||
F: include/qemu/transactions.h
|
||||
F: util/transactions.c
|
||||
@ -3135,6 +3145,19 @@ F: include/hw/i2c/smbus_master.h
|
||||
F: include/hw/i2c/smbus_slave.h
|
||||
F: include/hw/i2c/smbus_eeprom.h
|
||||
|
||||
PMBus
|
||||
M: Titus Rwantare <titusr@google.com>
|
||||
S: Maintained
|
||||
F: hw/i2c/pmbus_device.c
|
||||
F: hw/sensor/adm1272.c
|
||||
F: hw/sensor/isl_pmbus_vr.c
|
||||
F: hw/sensor/max34451.c
|
||||
F: include/hw/i2c/pmbus_device.h
|
||||
F: include/hw/sensor/isl_pmbus_vr.h
|
||||
F: tests/qtest/adm1272-test.c
|
||||
F: tests/qtest/max34451-test.c
|
||||
F: tests/qtest/isl_pmbus_vr-test.c
|
||||
|
||||
Firmware schema specifications
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
R: Daniel P. Berrange <berrange@redhat.com>
|
||||
@ -3144,7 +3167,7 @@ F: docs/interop/firmware.json
|
||||
|
||||
EDK2 Firmware
|
||||
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
R: Gerd Hoffmann <kraxel@redhat.com>
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Supported
|
||||
F: hw/i386/*ovmf*
|
||||
F: pc-bios/descriptors/??-edk2-*.json
|
||||
@ -3284,7 +3307,6 @@ S390 TCG target
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
F: tcg/s390/
|
||||
F: disas/s390.c
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
||||
SPARC TCG target
|
||||
@ -3337,7 +3359,7 @@ F: block/iscsi-opts.c
|
||||
|
||||
Network Block Device (NBD)
|
||||
M: Eric Blake <eblake@redhat.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Maintained
|
||||
F: block/nbd*
|
||||
@ -3433,7 +3455,7 @@ F: block/dmg.c
|
||||
parallels
|
||||
M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
M: Denis V. Lunev <den@openvz.org>
|
||||
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
M: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: block/parallels.c
|
||||
@ -3549,6 +3571,7 @@ S: Maintained
|
||||
F: semihosting/
|
||||
F: include/semihosting/
|
||||
F: tests/tcg/multiarch/arm-compat-semi/
|
||||
F: tests/tcg/aarch64/system/semiheap.c
|
||||
|
||||
Multi-process QEMU
|
||||
M: Elena Ufimtseva <elena.ufimtseva@oracle.com>
|
||||
@ -3591,7 +3614,7 @@ M: Thomas Huth <thuth@redhat.com>
|
||||
R: Wainer dos Santos Moschetta <wainersm@redhat.com>
|
||||
R: Beraldo Leal <bleal@redhat.com>
|
||||
S: Maintained
|
||||
F: .github/lockdown.yml
|
||||
F: .github/workflows/lockdown.yml
|
||||
F: .gitlab-ci.yml
|
||||
F: .gitlab-ci.d/
|
||||
F: .travis.yml
|
||||
@ -3607,7 +3630,8 @@ FreeBSD Hosted Continuous Integration
|
||||
M: Ed Maste <emaste@freebsd.org>
|
||||
M: Li-Wen Hsu <lwhsu@freebsd.org>
|
||||
S: Maintained
|
||||
F: .cirrus.yml
|
||||
F: .gitlab-ci.d/cirrus/freebsd*
|
||||
F: tests/vm/freebsd
|
||||
W: https://cirrus-ci.com/github/qemu/qemu
|
||||
|
||||
Windows Hosted Continuous Integration
|
||||
|
@ -122,7 +122,7 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||
MemoryRegion *area = section->mr;
|
||||
bool writeable = !area->readonly && !area->rom_device;
|
||||
hv_memory_flags_t flags;
|
||||
uint64_t page_size = qemu_real_host_page_size;
|
||||
uint64_t page_size = qemu_real_host_page_size();
|
||||
|
||||
if (!memory_region_is_ram(area)) {
|
||||
if (writeable) {
|
||||
|
@ -9,7 +9,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
|
@ -74,11 +74,23 @@ static void kvm_start_vcpu_thread(CPUState *cpu)
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
static bool kvm_vcpu_thread_is_idle(CPUState *cpu)
|
||||
{
|
||||
return !kvm_halt_in_kernel();
|
||||
}
|
||||
|
||||
static bool kvm_cpus_are_resettable(void)
|
||||
{
|
||||
return !kvm_enabled() || kvm_cpu_check_are_resettable();
|
||||
}
|
||||
|
||||
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
ops->create_vcpu_thread = kvm_start_vcpu_thread;
|
||||
ops->cpu_thread_is_idle = kvm_vcpu_thread_is_idle;
|
||||
ops->cpus_are_resettable = kvm_cpus_are_resettable;
|
||||
ops->synchronize_post_reset = kvm_cpu_synchronize_post_reset;
|
||||
ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
|
||||
ops->synchronize_state = kvm_cpu_synchronize_state;
|
||||
|
@ -59,7 +59,7 @@
|
||||
#ifdef PAGE_SIZE
|
||||
#undef PAGE_SIZE
|
||||
#endif
|
||||
#define PAGE_SIZE qemu_real_host_page_size
|
||||
#define PAGE_SIZE qemu_real_host_page_size()
|
||||
|
||||
#ifndef KVM_GUESTDBG_BLOCKIRQ
|
||||
#define KVM_GUESTDBG_BLOCKIRQ 0
|
||||
@ -324,14 +324,14 @@ static hwaddr kvm_align_section(MemoryRegionSection *section,
|
||||
with sub-page size and unaligned start address. Pad the start
|
||||
address to next and truncate size to previous page boundary. */
|
||||
aligned = ROUND_UP(section->offset_within_address_space,
|
||||
qemu_real_host_page_size);
|
||||
qemu_real_host_page_size());
|
||||
delta = aligned - section->offset_within_address_space;
|
||||
*start = aligned;
|
||||
if (delta > size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (size - delta) & qemu_real_host_page_mask;
|
||||
return (size - delta) & qemu_real_host_page_mask();
|
||||
}
|
||||
|
||||
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
|
||||
@ -626,7 +626,7 @@ static void kvm_log_stop(MemoryListener *listener,
|
||||
static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
|
||||
{
|
||||
ram_addr_t start = slot->ram_start_offset;
|
||||
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size;
|
||||
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
|
||||
|
||||
cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
|
||||
}
|
||||
@ -662,7 +662,7 @@ static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
|
||||
* And mem->memory_size is aligned to it (otherwise this mem can't
|
||||
* be registered to KVM).
|
||||
*/
|
||||
hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size,
|
||||
hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
|
||||
/*HOST_LONG_BITS*/ 64) / 8;
|
||||
mem->dirty_bmap = g_malloc0(bitmap_size);
|
||||
mem->dirty_bmap_size = bitmap_size;
|
||||
@ -707,7 +707,7 @@ static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
|
||||
mem = &kml->slots[slot_id];
|
||||
|
||||
if (!mem->memory_size || offset >=
|
||||
(mem->memory_size / qemu_real_host_page_size)) {
|
||||
(mem->memory_size / qemu_real_host_page_size())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -895,7 +895,7 @@ static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
|
||||
|
||||
/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
|
||||
#define KVM_CLEAR_LOG_SHIFT 6
|
||||
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
|
||||
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
|
||||
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
|
||||
|
||||
static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
|
||||
@ -904,7 +904,7 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
|
||||
KVMState *s = kvm_state;
|
||||
uint64_t end, bmap_start, start_delta, bmap_npages;
|
||||
struct kvm_clear_dirty_log d;
|
||||
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
|
||||
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -1202,8 +1202,8 @@ void kvm_hwpoison_page_add(ram_addr_t ram_addr)
|
||||
|
||||
static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
|
||||
{
|
||||
#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
|
||||
/* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
|
||||
#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
|
||||
/* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
|
||||
* endianness, but the memory core hands them in target endianness.
|
||||
* For example, PPC is always treated as big-endian even if running
|
||||
* on KVM and on PPC64LE. Correct here.
|
||||
@ -1335,7 +1335,7 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
|
||||
void kvm_set_max_memslot_size(hwaddr max_slot_size)
|
||||
{
|
||||
g_assert(
|
||||
ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
|
||||
ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
|
||||
);
|
||||
kvm_max_slot_size = max_slot_size;
|
||||
}
|
||||
@ -1646,7 +1646,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
|
||||
{
|
||||
int i;
|
||||
|
||||
kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
|
||||
kml->slots = g_new0(KVMSlot, s->nr_slots);
|
||||
kml->as_id = as_id;
|
||||
|
||||
for (i = 0; i < s->nr_slots; i++) {
|
||||
@ -1941,7 +1941,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
|
||||
return virq;
|
||||
}
|
||||
|
||||
route = g_malloc0(sizeof(KVMMSIRoute));
|
||||
route = g_new0(KVMMSIRoute, 1);
|
||||
route->kroute.gsi = virq;
|
||||
route->kroute.type = KVM_IRQ_ROUTING_MSI;
|
||||
route->kroute.flags = 0;
|
||||
@ -1961,10 +1961,11 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
|
||||
return kvm_set_irq(s, route->kroute.gsi, 1);
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
|
||||
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
|
||||
{
|
||||
struct kvm_irq_routing_entry kroute = {};
|
||||
int virq;
|
||||
KVMState *s = c->s;
|
||||
MSIMessage msg = {0, 0};
|
||||
|
||||
if (pci_available && dev) {
|
||||
@ -2004,7 +2005,7 @@ int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
|
||||
|
||||
kvm_add_routing_entry(s, &kroute);
|
||||
kvm_arch_add_msi_route_post(&kroute, vector, dev);
|
||||
kvm_irqchip_commit_routes(s);
|
||||
c->changes++;
|
||||
|
||||
return virq;
|
||||
}
|
||||
@ -2162,7 +2163,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
|
||||
abort();
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
|
||||
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
@ -2340,7 +2341,7 @@ static int kvm_init(MachineState *ms)
|
||||
* even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
|
||||
* page size for the system though.
|
||||
*/
|
||||
assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size);
|
||||
assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
|
||||
|
||||
s->sigmask_len = 8;
|
||||
|
||||
@ -3243,7 +3244,7 @@ int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
|
||||
bp = g_new(struct kvm_sw_breakpoint, 1);
|
||||
bp->pc = addr;
|
||||
bp->use_count = 1;
|
||||
err = kvm_arch_insert_sw_breakpoint(cpu, bp);
|
||||
|
@ -2,12 +2,14 @@ specific_ss.add(files('accel-common.c'))
|
||||
softmmu_ss.add(files('accel-softmmu.c'))
|
||||
user_ss.add(files('accel-user.c'))
|
||||
|
||||
subdir('hvf')
|
||||
subdir('qtest')
|
||||
subdir('kvm')
|
||||
subdir('tcg')
|
||||
subdir('xen')
|
||||
subdir('stubs')
|
||||
if have_system
|
||||
subdir('hvf')
|
||||
subdir('qtest')
|
||||
subdir('kvm')
|
||||
subdir('xen')
|
||||
subdir('stubs')
|
||||
endif
|
||||
|
||||
dummy_ss = ss.source_set()
|
||||
dummy_ss.add(files(
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "qemu/accel.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/hax.h"
|
||||
|
||||
bool hax_allowed;
|
||||
|
||||
int hax_sync_vcpus(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -12,10 +12,7 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/kvm.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "hw/pci/msi.h"
|
||||
#endif
|
||||
|
||||
KVMState *kvm_state;
|
||||
bool kvm_kernel_irqchip;
|
||||
@ -80,8 +77,7 @@ int kvm_on_sigbus(int code, void *addr)
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
|
||||
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
@ -152,4 +148,3 @@ bool kvm_dirty_ring_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
@ -1,4 +1,7 @@
|
||||
specific_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c'))
|
||||
specific_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
specific_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
specific_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
sysemu_stubs_ss = ss.source_set()
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: sysemu_stubs_ss)
|
||||
|
@ -28,12 +28,12 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu)
|
||||
G_NORETURN void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
||||
G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
@ -63,7 +63,7 @@
|
||||
the ATOMIC_NAME macro, and redefined below. */
|
||||
#if DATA_SIZE == 1
|
||||
# define END
|
||||
#elif defined(HOST_WORDS_BIGENDIAN)
|
||||
#elif HOST_BIG_ENDIAN
|
||||
# define END _be
|
||||
#else
|
||||
# define END _le
|
||||
@ -196,7 +196,7 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
|
||||
|
||||
/* Define reverse-host-endian atomic operations. Note that END is used
|
||||
within the ATOMIC_NAME macro. */
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
#if HOST_BIG_ENDIAN
|
||||
# define END _le
|
||||
#else
|
||||
# define END _be
|
||||
|
@ -18,7 +18,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/qemu-print.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qapi-commands-machine.h"
|
||||
@ -215,7 +214,8 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
|
||||
|
||||
#if defined(DEBUG_DISAS)
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
|
||||
FILE *logfile = qemu_log_lock();
|
||||
FILE *logfile = qemu_log_trylock();
|
||||
if (logfile) {
|
||||
int flags = 0;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
|
||||
@ -224,9 +224,10 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
|
||||
#if defined(TARGET_I386)
|
||||
flags |= CPU_DUMP_CCOP;
|
||||
#endif
|
||||
log_cpu_state(cpu, flags);
|
||||
cpu_dump_state(cpu, logfile, flags);
|
||||
qemu_log_unlock(logfile);
|
||||
}
|
||||
}
|
||||
#endif /* DEBUG_DISAS */
|
||||
}
|
||||
}
|
||||
@ -422,7 +423,7 @@ static void cpu_exec_exit(CPUState *cpu)
|
||||
|
||||
void cpu_exec_step_atomic(CPUState *cpu)
|
||||
{
|
||||
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
TranslationBlock *tb;
|
||||
target_ulong cs_base, pc;
|
||||
uint32_t flags, cflags;
|
||||
@ -532,7 +533,7 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
|
||||
struct tb_desc desc;
|
||||
uint32_t h;
|
||||
|
||||
desc.env = (CPUArchState *)cpu->env_ptr;
|
||||
desc.env = cpu->env_ptr;
|
||||
desc.cs_base = cs_base;
|
||||
desc.flags = flags;
|
||||
desc.cflags = cflags;
|
||||
@ -811,8 +812,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
* raised when single-stepping so that GDB doesn't miss the
|
||||
* next instruction.
|
||||
*/
|
||||
cpu->exception_index =
|
||||
(cpu->singlestep_enabled ? EXCP_DEBUG : -1);
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
*last_tb = NULL;
|
||||
}
|
||||
/* The target hook may have updated the 'cpu->interrupt_request';
|
||||
|
@ -1761,7 +1761,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||
MemOpIdx oi, int size, int prot,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
size_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
MemOp mop = get_memop(oi);
|
||||
int a_bits = get_alignment_bits(mop);
|
||||
uintptr_t index;
|
||||
@ -1769,6 +1769,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||
target_ulong tlb_addr;
|
||||
void *hostaddr;
|
||||
|
||||
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
@ -1908,18 +1910,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
|
||||
uintptr_t retaddr, MemOp op, bool code_read,
|
||||
FullLoadHelper *full_load)
|
||||
{
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
|
||||
const size_t tlb_off = code_read ?
|
||||
offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
|
||||
const MMUAccessType access_type =
|
||||
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
|
||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
const unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
const size_t size = memop_size(op);
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index;
|
||||
CPUTLBEntry *entry;
|
||||
target_ulong tlb_addr;
|
||||
void *haddr;
|
||||
uint64_t res;
|
||||
size_t size = memop_size(op);
|
||||
|
||||
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
||||
|
||||
/* Handle CPU specific unaligned behaviour */
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
@ -1927,6 +1931,10 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
|
||||
|
||||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if (!tlb_hit(tlb_addr, addr)) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
|
||||
@ -2174,7 +2182,7 @@ uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return cpu_load_helper(env, addr, oi, MO_BEUQ, helper_be_ldq_mmu);
|
||||
return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
|
||||
}
|
||||
|
||||
uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
|
||||
@ -2310,14 +2318,16 @@ static inline void QEMU_ALWAYS_INLINE
|
||||
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr, MemOp op)
|
||||
{
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong tlb_addr = tlb_addr_write(entry);
|
||||
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
|
||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
const unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
const size_t size = memop_size(op);
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index;
|
||||
CPUTLBEntry *entry;
|
||||
target_ulong tlb_addr;
|
||||
void *haddr;
|
||||
size_t size = memop_size(op);
|
||||
|
||||
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
||||
|
||||
/* Handle CPU specific unaligned behaviour */
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
@ -2325,6 +2335,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
tlb_addr = tlb_addr_write(entry);
|
||||
|
||||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if (!tlb_hit(tlb_addr, addr)) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
|
||||
@ -2552,7 +2566,6 @@ void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
|
||||
|
||||
#define ATOMIC_MMU_CLEANUP
|
||||
#define ATOMIC_MMU_IDX get_mmuidx(oi)
|
||||
|
||||
#include "atomic_common.c.inc"
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
#include "qapi/qapi-commands-machine.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "sysemu/tcg.h"
|
||||
|
||||
static void hmp_tcg_register(void)
|
||||
{
|
||||
|
@ -14,8 +14,7 @@
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
|
||||
target_ulong cs_base, uint32_t flags,
|
||||
int cflags);
|
||||
|
||||
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
void page_init(void);
|
||||
void tb_htable_init(void);
|
||||
|
||||
|
@ -24,9 +24,8 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
@ -24,9 +24,9 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/notify.h"
|
||||
#include "qemu/guest-random.h"
|
||||
@ -142,7 +142,7 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
|
||||
g_assert(tcg_enabled());
|
||||
tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->thread = g_new0(QemuThread, 1);
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
|
@ -24,9 +24,9 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/notify.h"
|
||||
#include "qemu/guest-random.h"
|
||||
@ -279,8 +279,8 @@ void rr_start_vcpu_thread(CPUState *cpu)
|
||||
tcg_cpu_init_cflags(cpu, false);
|
||||
|
||||
if (!single_tcg_cpu_thread) {
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
cpu->thread = g_new0(QemuThread, 1);
|
||||
cpu->halt_cond = g_new0(QemuCond, 1);
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
/* share a single thread for all cpus with TCG */
|
||||
|
@ -26,9 +26,9 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
@ -24,7 +24,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
@ -18,7 +18,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
|
||||
#define NO_CPU_IO_DEFS
|
||||
#include "trace.h"
|
||||
@ -2096,7 +2095,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
#ifdef DEBUG_DISAS
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
|
||||
qemu_log_in_addr_range(tb->pc)) {
|
||||
FILE *logfile = qemu_log_lock();
|
||||
FILE *logfile = qemu_log_trylock();
|
||||
if (logfile) {
|
||||
int code_size, data_size;
|
||||
const tcg_target_ulong *rx_data_gen_ptr;
|
||||
size_t chunk_start;
|
||||
@ -2113,11 +2113,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
}
|
||||
|
||||
/* Dump header and the first instruction */
|
||||
qemu_log("OUT: [size=%d]\n", gen_code_size);
|
||||
qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
|
||||
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
|
||||
fprintf(logfile,
|
||||
" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
|
||||
tcg_ctx->gen_insn_data[insn][0]);
|
||||
chunk_start = tcg_ctx->gen_insn_end_off[insn];
|
||||
log_disas(tb->tc.ptr, chunk_start);
|
||||
disas(logfile, tb->tc.ptr, chunk_start);
|
||||
|
||||
/*
|
||||
* Dump each instruction chunk, wrapping up empty chunks into
|
||||
@ -2127,39 +2128,43 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
while (insn < tb->icount) {
|
||||
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
|
||||
if (chunk_end > chunk_start) {
|
||||
qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n",
|
||||
fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n",
|
||||
tcg_ctx->gen_insn_data[insn][0]);
|
||||
log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
|
||||
disas(logfile, tb->tc.ptr + chunk_start,
|
||||
chunk_end - chunk_start);
|
||||
chunk_start = chunk_end;
|
||||
}
|
||||
insn++;
|
||||
}
|
||||
|
||||
if (chunk_start < code_size) {
|
||||
qemu_log(" -- tb slow paths + alignment\n");
|
||||
log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
|
||||
fprintf(logfile, " -- tb slow paths + alignment\n");
|
||||
disas(logfile, tb->tc.ptr + chunk_start,
|
||||
code_size - chunk_start);
|
||||
}
|
||||
|
||||
/* Finally dump any data we may have after the block */
|
||||
if (data_size) {
|
||||
int i;
|
||||
qemu_log(" data: [size=%d]\n", data_size);
|
||||
fprintf(logfile, " data: [size=%d]\n", data_size);
|
||||
for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
|
||||
if (sizeof(tcg_target_ulong) == 8) {
|
||||
qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
|
||||
fprintf(logfile,
|
||||
"0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
|
||||
(uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
|
||||
} else if (sizeof(tcg_target_ulong) == 4) {
|
||||
qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
|
||||
fprintf(logfile,
|
||||
"0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
|
||||
(uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
|
||||
} else {
|
||||
qemu_build_not_reached();
|
||||
}
|
||||
}
|
||||
}
|
||||
qemu_log("\n");
|
||||
qemu_log_flush();
|
||||
fprintf(logfile, "\n");
|
||||
qemu_log_unlock(logfile);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
|
||||
|
@ -185,12 +185,14 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||
#ifdef DEBUG_DISAS
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
|
||||
&& qemu_log_in_addr_range(db->pc_first)) {
|
||||
FILE *logfile = qemu_log_lock();
|
||||
qemu_log("----------------\n");
|
||||
ops->disas_log(db, cpu);
|
||||
qemu_log("\n");
|
||||
FILE *logfile = qemu_log_trylock();
|
||||
if (logfile) {
|
||||
fprintf(logfile, "----------------\n");
|
||||
ops->disas_log(db, cpu, logfile);
|
||||
fprintf(logfile, "\n");
|
||||
qemu_log_unlock(logfile);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -506,7 +506,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||
#define ATOMIC_NAME(X) \
|
||||
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
|
||||
#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
|
||||
#define ATOMIC_MMU_IDX MMU_USER_IDX
|
||||
|
||||
#define DATA_SIZE 1
|
||||
#include "atomic_template.h"
|
||||
|
@ -72,7 +72,7 @@ struct alsa_params_obt {
|
||||
snd_pcm_uframes_t samples;
|
||||
};
|
||||
|
||||
static void GCC_FMT_ATTR (2, 3) alsa_logerr (int err, const char *fmt, ...)
|
||||
static void G_GNUC_PRINTF (2, 3) alsa_logerr (int err, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
@ -83,7 +83,7 @@ static void GCC_FMT_ATTR (2, 3) alsa_logerr (int err, const char *fmt, ...)
|
||||
AUD_log (AUDIO_CAP, "Reason: %s\n", snd_strerror (err));
|
||||
}
|
||||
|
||||
static void GCC_FMT_ATTR (3, 4) alsa_logerr2 (
|
||||
static void G_GNUC_PRINTF (3, 4) alsa_logerr2 (
|
||||
int err,
|
||||
const char *typ,
|
||||
const char *fmt,
|
||||
@ -916,6 +916,7 @@ static struct audio_pcm_ops alsa_pcm_ops = {
|
||||
.init_out = alsa_init_out,
|
||||
.fini_out = alsa_fini_out,
|
||||
.write = alsa_write,
|
||||
.buffer_get_free = audio_generic_buffer_get_free,
|
||||
.run_buffer_out = audio_generic_run_buffer_out,
|
||||
.enable_out = alsa_enable_out,
|
||||
|
||||
|
229
audio/audio.c
229
audio/audio.c
@ -32,7 +32,7 @@
|
||||
#include "qapi/qapi-visit-audio.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "ui/qemu-spice.h"
|
||||
@ -117,7 +117,6 @@ int audio_bug (const char *funcname, int cond)
|
||||
AUD_log (NULL, "I am sorry\n");
|
||||
}
|
||||
AUD_log (NULL, "Context:\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
return cond;
|
||||
@ -138,7 +137,7 @@ static inline int audio_bits_to_index (int bits)
|
||||
default:
|
||||
audio_bug ("bits_to_index", 1);
|
||||
AUD_log (NULL, "invalid bits %d\n", bits);
|
||||
return 0;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,7 +155,7 @@ void *audio_calloc (const char *funcname, int nmemb, size_t size)
|
||||
AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n",
|
||||
funcname);
|
||||
AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len);
|
||||
return NULL;
|
||||
abort();
|
||||
}
|
||||
|
||||
return g_malloc0 (len);
|
||||
@ -543,70 +542,50 @@ static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw)
|
||||
size_t live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw);
|
||||
if (audio_bug(__func__, live > hw->conv_buf->size)) {
|
||||
dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
|
||||
return 0;
|
||||
abort();
|
||||
}
|
||||
return live;
|
||||
}
|
||||
|
||||
static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len)
|
||||
static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples)
|
||||
{
|
||||
size_t clipped = 0;
|
||||
size_t pos = hw->mix_buf->pos;
|
||||
size_t conv = 0;
|
||||
STSampleBuffer *conv_buf = hw->conv_buf;
|
||||
|
||||
while (len) {
|
||||
st_sample *src = hw->mix_buf->samples + pos;
|
||||
uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame);
|
||||
size_t samples_till_end_of_buf = hw->mix_buf->size - pos;
|
||||
size_t samples_to_clip = MIN(len, samples_till_end_of_buf);
|
||||
while (samples) {
|
||||
uint8_t *src = advance(pcm_buf, conv * hw->info.bytes_per_frame);
|
||||
size_t proc = MIN(samples, conv_buf->size - conv_buf->pos);
|
||||
|
||||
hw->clip(dst, src, samples_to_clip);
|
||||
|
||||
pos = (pos + samples_to_clip) % hw->mix_buf->size;
|
||||
len -= samples_to_clip;
|
||||
clipped += samples_to_clip;
|
||||
hw->conv(conv_buf->samples + conv_buf->pos, src, proc);
|
||||
conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size;
|
||||
samples -= proc;
|
||||
conv += proc;
|
||||
}
|
||||
|
||||
return conv;
|
||||
}
|
||||
|
||||
/*
|
||||
* Soft voice (capture)
|
||||
*/
|
||||
static size_t audio_pcm_sw_get_rpos_in(SWVoiceIn *sw)
|
||||
{
|
||||
HWVoiceIn *hw = sw->hw;
|
||||
ssize_t live = hw->total_samples_captured - sw->total_hw_samples_acquired;
|
||||
ssize_t rpos;
|
||||
|
||||
if (audio_bug(__func__, live < 0 || live > hw->conv_buf->size)) {
|
||||
dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rpos = hw->conv_buf->pos - live;
|
||||
if (rpos >= 0) {
|
||||
return rpos;
|
||||
} else {
|
||||
return hw->conv_buf->size + rpos;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size)
|
||||
{
|
||||
HWVoiceIn *hw = sw->hw;
|
||||
size_t samples, live, ret = 0, swlim, isamp, osamp, rpos, total = 0;
|
||||
struct st_sample *src, *dst = sw->buf;
|
||||
|
||||
rpos = audio_pcm_sw_get_rpos_in(sw) % hw->conv_buf->size;
|
||||
|
||||
live = hw->total_samples_captured - sw->total_hw_samples_acquired;
|
||||
if (audio_bug(__func__, live > hw->conv_buf->size)) {
|
||||
dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
samples = size / sw->info.bytes_per_frame;
|
||||
if (!live) {
|
||||
return 0;
|
||||
}
|
||||
if (audio_bug(__func__, live > hw->conv_buf->size)) {
|
||||
dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
|
||||
abort();
|
||||
}
|
||||
|
||||
rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size);
|
||||
|
||||
samples = size / sw->info.bytes_per_frame;
|
||||
|
||||
swlim = (live * sw->ratio) >> 32;
|
||||
swlim = MIN (swlim, samples);
|
||||
@ -632,7 +611,7 @@ static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size)
|
||||
total += isamp;
|
||||
}
|
||||
|
||||
if (hw->pcm_ops && !hw->pcm_ops->volume_in) {
|
||||
if (!hw->pcm_ops->volume_in) {
|
||||
mixeng_volume (sw->buf, ret, &sw->vol);
|
||||
}
|
||||
|
||||
@ -676,19 +655,45 @@ static size_t audio_pcm_hw_get_live_out (HWVoiceOut *hw, int *nb_live)
|
||||
|
||||
if (audio_bug(__func__, live > hw->mix_buf->size)) {
|
||||
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
|
||||
return 0;
|
||||
abort();
|
||||
}
|
||||
return live;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t audio_pcm_hw_get_free(HWVoiceOut *hw)
|
||||
{
|
||||
return (hw->pcm_ops->buffer_get_free ? hw->pcm_ops->buffer_get_free(hw) :
|
||||
INT_MAX) / hw->info.bytes_per_frame;
|
||||
}
|
||||
|
||||
static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len)
|
||||
{
|
||||
size_t clipped = 0;
|
||||
size_t pos = hw->mix_buf->pos;
|
||||
|
||||
while (len) {
|
||||
st_sample *src = hw->mix_buf->samples + pos;
|
||||
uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame);
|
||||
size_t samples_till_end_of_buf = hw->mix_buf->size - pos;
|
||||
size_t samples_to_clip = MIN(len, samples_till_end_of_buf);
|
||||
|
||||
hw->clip(dst, src, samples_to_clip);
|
||||
|
||||
pos = (pos + samples_to_clip) % hw->mix_buf->size;
|
||||
len -= samples_to_clip;
|
||||
clipped += samples_to_clip;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Soft voice (playback)
|
||||
*/
|
||||
static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
|
||||
{
|
||||
size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, swlim, blck;
|
||||
size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, blck;
|
||||
size_t hw_free;
|
||||
size_t ret = 0, pos = 0, total = 0;
|
||||
|
||||
if (!sw) {
|
||||
@ -700,7 +705,7 @@ static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
|
||||
live = sw->total_hw_samples_mixed;
|
||||
if (audio_bug(__func__, live > hwsamples)) {
|
||||
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hwsamples);
|
||||
return 0;
|
||||
abort();
|
||||
}
|
||||
|
||||
if (live == hwsamples) {
|
||||
@ -711,27 +716,28 @@ static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
|
||||
}
|
||||
|
||||
wpos = (sw->hw->mix_buf->pos + live) % hwsamples;
|
||||
samples = size / sw->info.bytes_per_frame;
|
||||
|
||||
dead = hwsamples - live;
|
||||
swlim = ((int64_t) dead << 32) / sw->ratio;
|
||||
swlim = MIN (swlim, samples);
|
||||
if (swlim) {
|
||||
sw->conv (sw->buf, buf, swlim);
|
||||
hw_free = audio_pcm_hw_get_free(sw->hw);
|
||||
hw_free = hw_free > live ? hw_free - live : 0;
|
||||
samples = ((int64_t)MIN(dead, hw_free) << 32) / sw->ratio;
|
||||
samples = MIN(samples, size / sw->info.bytes_per_frame);
|
||||
if (samples) {
|
||||
sw->conv(sw->buf, buf, samples);
|
||||
|
||||
if (sw->hw->pcm_ops && !sw->hw->pcm_ops->volume_out) {
|
||||
mixeng_volume (sw->buf, swlim, &sw->vol);
|
||||
if (!sw->hw->pcm_ops->volume_out) {
|
||||
mixeng_volume(sw->buf, samples, &sw->vol);
|
||||
}
|
||||
}
|
||||
|
||||
while (swlim) {
|
||||
while (samples) {
|
||||
dead = hwsamples - live;
|
||||
left = hwsamples - wpos;
|
||||
blck = MIN (dead, left);
|
||||
if (!blck) {
|
||||
break;
|
||||
}
|
||||
isamp = swlim;
|
||||
isamp = samples;
|
||||
osamp = blck;
|
||||
st_rate_flow_mix (
|
||||
sw->rate,
|
||||
@ -741,7 +747,7 @@ static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
|
||||
&osamp
|
||||
);
|
||||
ret += isamp;
|
||||
swlim -= isamp;
|
||||
samples -= isamp;
|
||||
pos += isamp;
|
||||
live += osamp;
|
||||
wpos = (wpos + osamp) % hwsamples;
|
||||
@ -991,7 +997,7 @@ static size_t audio_get_avail (SWVoiceIn *sw)
|
||||
if (audio_bug(__func__, live > sw->hw->conv_buf->size)) {
|
||||
dolog("live=%zu sw->hw->conv_buf->size=%zu\n", live,
|
||||
sw->hw->conv_buf->size);
|
||||
return 0;
|
||||
abort();
|
||||
}
|
||||
|
||||
ldebug (
|
||||
@ -1003,6 +1009,11 @@ static size_t audio_get_avail (SWVoiceIn *sw)
|
||||
return (((int64_t) live << 32) / sw->ratio) * sw->info.bytes_per_frame;
|
||||
}
|
||||
|
||||
static size_t audio_sw_bytes_free(SWVoiceOut *sw, size_t free)
|
||||
{
|
||||
return (((int64_t)free << 32) / sw->ratio) * sw->info.bytes_per_frame;
|
||||
}
|
||||
|
||||
static size_t audio_get_free(SWVoiceOut *sw)
|
||||
{
|
||||
size_t live, dead;
|
||||
@ -1016,19 +1027,17 @@ static size_t audio_get_free(SWVoiceOut *sw)
|
||||
if (audio_bug(__func__, live > sw->hw->mix_buf->size)) {
|
||||
dolog("live=%zu sw->hw->mix_buf->size=%zu\n", live,
|
||||
sw->hw->mix_buf->size);
|
||||
return 0;
|
||||
abort();
|
||||
}
|
||||
|
||||
dead = sw->hw->mix_buf->size - live;
|
||||
|
||||
#ifdef DEBUG_OUT
|
||||
dolog ("%s: get_free live %zu dead %zu ret %" PRId64 "\n",
|
||||
SW_NAME (sw),
|
||||
live, dead, (((int64_t) dead << 32) / sw->ratio) *
|
||||
sw->info.bytes_per_frame);
|
||||
dolog("%s: get_free live %zu dead %zu sw_bytes %zu\n",
|
||||
SW_NAME(sw), live, dead, audio_sw_bytes_free(sw, dead));
|
||||
#endif
|
||||
|
||||
return (((int64_t) dead << 32) / sw->ratio) * sw->info.bytes_per_frame;
|
||||
return dead;
|
||||
}
|
||||
|
||||
static void audio_capture_mix_and_clear(HWVoiceOut *hw, size_t rpos,
|
||||
@ -1132,9 +1141,27 @@ static void audio_run_out (AudioState *s)
|
||||
}
|
||||
|
||||
while ((hw = audio_pcm_hw_find_any_enabled_out(s, hw))) {
|
||||
size_t played, live, prev_rpos, free;
|
||||
size_t played, live, prev_rpos;
|
||||
size_t hw_free = audio_pcm_hw_get_free(hw);
|
||||
int nb_live;
|
||||
|
||||
for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) {
|
||||
if (sw->active) {
|
||||
size_t sw_free = audio_get_free(sw);
|
||||
size_t free;
|
||||
|
||||
if (hw_free > sw->total_hw_samples_mixed) {
|
||||
free = audio_sw_bytes_free(sw,
|
||||
MIN(sw_free, hw_free - sw->total_hw_samples_mixed));
|
||||
} else {
|
||||
free = 0;
|
||||
}
|
||||
if (free > 0) {
|
||||
sw->callback.fn(sw->callback.opaque, free);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
live = audio_pcm_hw_get_live_out (hw, &nb_live);
|
||||
if (!nb_live) {
|
||||
live = 0;
|
||||
@ -1142,7 +1169,7 @@ static void audio_run_out (AudioState *s)
|
||||
|
||||
if (audio_bug(__func__, live > hw->mix_buf->size)) {
|
||||
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
|
||||
continue;
|
||||
abort();
|
||||
}
|
||||
|
||||
if (hw->pending_disable && !nb_live) {
|
||||
@ -1163,14 +1190,6 @@ static void audio_run_out (AudioState *s)
|
||||
}
|
||||
|
||||
if (!live) {
|
||||
for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) {
|
||||
if (sw->active) {
|
||||
free = audio_get_free (sw);
|
||||
if (free > 0) {
|
||||
sw->callback.fn (sw->callback.opaque, free);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (hw->pcm_ops->run_buffer_out) {
|
||||
hw->pcm_ops->run_buffer_out(hw);
|
||||
}
|
||||
@ -1183,7 +1202,7 @@ static void audio_run_out (AudioState *s)
|
||||
if (audio_bug(__func__, hw->mix_buf->pos >= hw->mix_buf->size)) {
|
||||
dolog("hw->mix_buf->pos=%zu hw->mix_buf->size=%zu played=%zu\n",
|
||||
hw->mix_buf->pos, hw->mix_buf->size, played);
|
||||
hw->mix_buf->pos = 0;
|
||||
abort();
|
||||
}
|
||||
|
||||
#ifdef DEBUG_OUT
|
||||
@ -1203,7 +1222,7 @@ static void audio_run_out (AudioState *s)
|
||||
if (audio_bug(__func__, played > sw->total_hw_samples_mixed)) {
|
||||
dolog("played=%zu sw->total_hw_samples_mixed=%zu\n",
|
||||
played, sw->total_hw_samples_mixed);
|
||||
played = sw->total_hw_samples_mixed;
|
||||
abort();
|
||||
}
|
||||
|
||||
sw->total_hw_samples_mixed -= played;
|
||||
@ -1211,13 +1230,6 @@ static void audio_run_out (AudioState *s)
|
||||
if (!sw->total_hw_samples_mixed) {
|
||||
sw->empty = 1;
|
||||
}
|
||||
|
||||
if (sw->active) {
|
||||
free = audio_get_free (sw);
|
||||
if (free > 0) {
|
||||
sw->callback.fn (sw->callback.opaque, free);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1225,7 +1237,6 @@ static void audio_run_out (AudioState *s)
|
||||
static size_t audio_pcm_hw_run_in(HWVoiceIn *hw, size_t samples)
|
||||
{
|
||||
size_t conv = 0;
|
||||
STSampleBuffer *conv_buf = hw->conv_buf;
|
||||
|
||||
if (hw->pcm_ops->run_buffer_in) {
|
||||
hw->pcm_ops->run_buffer_in(hw);
|
||||
@ -1241,11 +1252,7 @@ static size_t audio_pcm_hw_run_in(HWVoiceIn *hw, size_t samples)
|
||||
break;
|
||||
}
|
||||
|
||||
proc = MIN(size / hw->info.bytes_per_frame,
|
||||
conv_buf->size - conv_buf->pos);
|
||||
|
||||
hw->conv(conv_buf->samples + conv_buf->pos, buf, proc);
|
||||
conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size;
|
||||
proc = audio_pcm_hw_conv_in(hw, buf, size / hw->info.bytes_per_frame);
|
||||
|
||||
samples -= proc;
|
||||
conv += proc;
|
||||
@ -1338,7 +1345,7 @@ static void audio_run_capture (AudioState *s)
|
||||
if (audio_bug(__func__, captured > sw->total_hw_samples_mixed)) {
|
||||
dolog("captured=%zu sw->total_hw_samples_mixed=%zu\n",
|
||||
captured, sw->total_hw_samples_mixed);
|
||||
captured = sw->total_hw_samples_mixed;
|
||||
abort();
|
||||
}
|
||||
|
||||
sw->total_hw_samples_mixed -= captured;
|
||||
@ -1394,12 +1401,10 @@ void audio_generic_run_buffer_in(HWVoiceIn *hw)
|
||||
|
||||
void *audio_generic_get_buffer_in(HWVoiceIn *hw, size_t *size)
|
||||
{
|
||||
ssize_t start = (ssize_t)hw->pos_emul - hw->pending_emul;
|
||||
size_t start;
|
||||
|
||||
if (start < 0) {
|
||||
start += hw->size_emul;
|
||||
}
|
||||
assert(start >= 0 && start < hw->size_emul);
|
||||
start = audio_ring_posb(hw->pos_emul, hw->pending_emul, hw->size_emul);
|
||||
assert(start < hw->size_emul);
|
||||
|
||||
*size = MIN(*size, hw->pending_emul);
|
||||
*size = MIN(*size, hw->size_emul - start);
|
||||
@ -1412,16 +1417,22 @@ void audio_generic_put_buffer_in(HWVoiceIn *hw, void *buf, size_t size)
|
||||
hw->pending_emul -= size;
|
||||
}
|
||||
|
||||
size_t audio_generic_buffer_get_free(HWVoiceOut *hw)
|
||||
{
|
||||
if (hw->buf_emul) {
|
||||
return hw->size_emul - hw->pending_emul;
|
||||
} else {
|
||||
return hw->samples * hw->info.bytes_per_frame;
|
||||
}
|
||||
}
|
||||
|
||||
void audio_generic_run_buffer_out(HWVoiceOut *hw)
|
||||
{
|
||||
while (hw->pending_emul) {
|
||||
size_t write_len, written;
|
||||
ssize_t start = ((ssize_t) hw->pos_emul) - hw->pending_emul;
|
||||
size_t write_len, written, start;
|
||||
|
||||
if (start < 0) {
|
||||
start += hw->size_emul;
|
||||
}
|
||||
assert(start >= 0 && start < hw->size_emul);
|
||||
start = audio_ring_posb(hw->pos_emul, hw->pending_emul, hw->size_emul);
|
||||
assert(start < hw->size_emul);
|
||||
|
||||
write_len = MIN(hw->pending_emul, hw->size_emul - start);
|
||||
|
||||
@ -1462,6 +1473,12 @@ size_t audio_generic_write(HWVoiceOut *hw, void *buf, size_t size)
|
||||
{
|
||||
size_t total = 0;
|
||||
|
||||
if (hw->pcm_ops->buffer_get_free) {
|
||||
size_t free = hw->pcm_ops->buffer_get_free(hw);
|
||||
|
||||
size = MIN(size, free);
|
||||
}
|
||||
|
||||
while (total < size) {
|
||||
size_t dst_size = size - total;
|
||||
size_t copy_size, proc;
|
||||
@ -1716,7 +1733,7 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
|
||||
audio_validate_opts(dev, &error_abort);
|
||||
}
|
||||
|
||||
s = g_malloc0(sizeof(AudioState));
|
||||
s = g_new0(AudioState, 1);
|
||||
s->dev = dev;
|
||||
|
||||
QLIST_INIT (&s->hw_head_out);
|
||||
@ -1821,6 +1838,7 @@ void AUD_remove_card (QEMUSoundCard *card)
|
||||
g_free (card->name);
|
||||
}
|
||||
|
||||
static struct audio_pcm_ops capture_pcm_ops;
|
||||
|
||||
CaptureVoiceOut *AUD_add_capture(
|
||||
AudioState *s,
|
||||
@ -1866,6 +1884,7 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
|
||||
hw = &cap->hw;
|
||||
hw->s = s;
|
||||
hw->pcm_ops = &capture_pcm_ops;
|
||||
QLIST_INIT (&hw->sw_head);
|
||||
QLIST_INIT (&cap->cb_head);
|
||||
|
||||
@ -2089,7 +2108,7 @@ void audio_parse_option(const char *opt)
|
||||
|
||||
audio_validate_opts(dev, &error_fatal);
|
||||
|
||||
e = g_malloc0(sizeof(AudiodevListEntry));
|
||||
e = g_new0(AudiodevListEntry, 1);
|
||||
e->dev = dev;
|
||||
QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next);
|
||||
}
|
||||
|
@ -32,7 +32,7 @@
|
||||
|
||||
typedef void (*audio_callback_fn) (void *opaque, int avail);
|
||||
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
#if HOST_BIG_ENDIAN
|
||||
#define AUDIO_HOST_ENDIANNESS 1
|
||||
#else
|
||||
#define AUDIO_HOST_ENDIANNESS 0
|
||||
@ -91,8 +91,8 @@ typedef struct QEMUAudioTimeStamp {
|
||||
uint64_t old_ts;
|
||||
} QEMUAudioTimeStamp;
|
||||
|
||||
void AUD_vlog (const char *cap, const char *fmt, va_list ap) GCC_FMT_ATTR(2, 0);
|
||||
void AUD_log (const char *cap, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
|
||||
void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0);
|
||||
void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
|
||||
|
||||
void AUD_register_card (const char *name, QEMUSoundCard *card);
|
||||
void AUD_remove_card (QEMUSoundCard *card);
|
||||
|
@ -161,10 +161,14 @@ struct audio_pcm_ops {
|
||||
void (*fini_out)(HWVoiceOut *hw);
|
||||
size_t (*write) (HWVoiceOut *hw, void *buf, size_t size);
|
||||
void (*run_buffer_out)(HWVoiceOut *hw);
|
||||
/*
|
||||
* Get the free output buffer size. This is an upper limit. The size
|
||||
* returned by function get_buffer_out may be smaller.
|
||||
*/
|
||||
size_t (*buffer_get_free)(HWVoiceOut *hw);
|
||||
/*
|
||||
* get a buffer that after later can be passed to put_buffer_out; optional
|
||||
* returns the buffer, and writes it's size to size (in bytes)
|
||||
* this is unrelated to the above buffer_size_out function
|
||||
*/
|
||||
void *(*get_buffer_out)(HWVoiceOut *hw, size_t *size);
|
||||
/*
|
||||
@ -190,6 +194,7 @@ void audio_generic_run_buffer_in(HWVoiceIn *hw);
|
||||
void *audio_generic_get_buffer_in(HWVoiceIn *hw, size_t *size);
|
||||
void audio_generic_put_buffer_in(HWVoiceIn *hw, void *buf, size_t size);
|
||||
void audio_generic_run_buffer_out(HWVoiceOut *hw);
|
||||
size_t audio_generic_buffer_get_free(HWVoiceOut *hw);
|
||||
void *audio_generic_get_buffer_out(HWVoiceOut *hw, size_t *size);
|
||||
size_t audio_generic_put_buffer_out(HWVoiceOut *hw, void *buf, size_t size);
|
||||
size_t audio_generic_write(HWVoiceOut *hw, void *buf, size_t size);
|
||||
@ -266,6 +271,19 @@ static inline size_t audio_ring_dist(size_t dst, size_t src, size_t len)
|
||||
return (dst >= src) ? (dst - src) : (len - src + dst);
|
||||
}
|
||||
|
||||
/**
|
||||
* audio_ring_posb() - returns new position in ringbuffer in backward
|
||||
* direction at given distance
|
||||
*
|
||||
* @pos: current position in ringbuffer
|
||||
* @dist: distance in ringbuffer to walk in reverse direction
|
||||
* @len: size of ringbuffer
|
||||
*/
|
||||
static inline size_t audio_ring_posb(size_t pos, size_t dist, size_t len)
|
||||
{
|
||||
return pos >= dist ? pos - dist : len - dist + pos;
|
||||
}
|
||||
|
||||
#define dolog(fmt, ...) AUD_log(AUDIO_CAP, fmt, ## __VA_ARGS__)
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -328,8 +328,8 @@ static void handle_per_direction(
|
||||
|
||||
static AudiodevListEntry *legacy_opt(const char *drvname)
|
||||
{
|
||||
AudiodevListEntry *e = g_malloc0(sizeof(AudiodevListEntry));
|
||||
e->dev = g_malloc0(sizeof(Audiodev));
|
||||
AudiodevListEntry *e = g_new0(AudiodevListEntry, 1);
|
||||
e->dev = g_new0(Audiodev, 1);
|
||||
e->dev->id = g_strdup(drvname);
|
||||
e->dev->driver = qapi_enum_parse(
|
||||
&AudiodevDriver_lookup, drvname, -1, &error_abort);
|
||||
@ -508,7 +508,7 @@ static void lv_free(Visitor *v)
|
||||
|
||||
static Visitor *legacy_visitor_new(void)
|
||||
{
|
||||
LegacyPrintVisitor *lv = g_malloc0(sizeof(LegacyPrintVisitor));
|
||||
LegacyPrintVisitor *lv = g_new0(LegacyPrintVisitor, 1);
|
||||
|
||||
lv->visitor.start_struct = lv_start_struct;
|
||||
lv->visitor.end_struct = lv_end_struct;
|
||||
|
@ -59,12 +59,13 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
|
||||
if (audio_bug(__func__, !voice_size && max_voices)) {
|
||||
dolog ("drv=`%s' voice_size=0 max_voices=%d\n",
|
||||
drv->name, max_voices);
|
||||
glue (s->nb_hw_voices_, TYPE) = 0;
|
||||
abort();
|
||||
}
|
||||
|
||||
if (audio_bug(__func__, voice_size && !max_voices)) {
|
||||
dolog ("drv=`%s' voice_size=%d max_voices=0\n",
|
||||
drv->name, voice_size);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,6 +82,7 @@ static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
|
||||
size_t samples = hw->samples;
|
||||
if (audio_bug(__func__, samples == 0)) {
|
||||
dolog("Attempted to allocate empty buffer\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples);
|
||||
@ -252,12 +254,12 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
|
||||
|
||||
if (audio_bug(__func__, !drv)) {
|
||||
dolog ("No host audio driver\n");
|
||||
return NULL;
|
||||
abort();
|
||||
}
|
||||
|
||||
if (audio_bug(__func__, !drv->pcm_ops)) {
|
||||
dolog ("Host audio driver without pcm_ops\n");
|
||||
return NULL;
|
||||
abort();
|
||||
}
|
||||
|
||||
hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE));
|
||||
@ -275,12 +277,13 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
|
||||
QLIST_INIT (&hw->cap_head);
|
||||
#endif
|
||||
if (glue (hw->pcm_ops->init_, TYPE) (hw, as, s->drv_opaque)) {
|
||||
goto err0;
|
||||
g_free(hw);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (audio_bug(__func__, hw->samples <= 0)) {
|
||||
dolog("hw->samples=%zd\n", hw->samples);
|
||||
goto err1;
|
||||
abort();
|
||||
}
|
||||
|
||||
if (hw->info.is_float) {
|
||||
@ -309,12 +312,6 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
|
||||
audio_attach_capture (hw);
|
||||
#endif
|
||||
return hw;
|
||||
|
||||
err1:
|
||||
glue (hw->pcm_ops->fini_, TYPE) (hw);
|
||||
err0:
|
||||
g_free (hw);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev)
|
||||
@ -435,7 +432,7 @@ void glue (AUD_close_, TYPE) (QEMUSoundCard *card, SW *sw)
|
||||
if (sw) {
|
||||
if (audio_bug(__func__, !card)) {
|
||||
dolog ("card=%p\n", card);
|
||||
return;
|
||||
abort();
|
||||
}
|
||||
|
||||
glue (audio_close_, TYPE) (sw);
|
||||
@ -457,7 +454,7 @@ SW *glue (AUD_open_, TYPE) (
|
||||
if (audio_bug(__func__, !card || !name || !callback_fn || !as)) {
|
||||
dolog ("card=%p name=%p callback_fn=%p as=%p\n",
|
||||
card, name, callback_fn, as);
|
||||
goto fail;
|
||||
abort();
|
||||
}
|
||||
|
||||
s = card->state;
|
||||
@ -468,12 +465,12 @@ SW *glue (AUD_open_, TYPE) (
|
||||
|
||||
if (audio_bug(__func__, audio_validate_settings(as))) {
|
||||
audio_print_settings (as);
|
||||
goto fail;
|
||||
abort();
|
||||
}
|
||||
|
||||
if (audio_bug(__func__, !s->drv)) {
|
||||
dolog ("Can not open `%s' (no host audio driver)\n", name);
|
||||
goto fail;
|
||||
abort();
|
||||
}
|
||||
|
||||
if (sw && audio_pcm_info_eq (&sw->info, as)) {
|
||||
|
@ -1,7 +1,6 @@
|
||||
/* public domain */
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
|
||||
#define AUDIO_CAP "win-int"
|
||||
#include <windows.h>
|
||||
|
@ -44,10 +44,15 @@ typedef struct coreaudioVoiceOut {
|
||||
bool enabled;
|
||||
} coreaudioVoiceOut;
|
||||
|
||||
#if !defined(MAC_OS_VERSION_12_0) \
|
||||
|| (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0)
|
||||
#define kAudioObjectPropertyElementMain kAudioObjectPropertyElementMaster
|
||||
#endif
|
||||
|
||||
static const AudioObjectPropertyAddress voice_addr = {
|
||||
kAudioHardwarePropertyDefaultOutputDevice,
|
||||
kAudioObjectPropertyScopeGlobal,
|
||||
kAudioObjectPropertyElementMaster
|
||||
kAudioObjectPropertyElementMain
|
||||
};
|
||||
|
||||
static OSStatus coreaudio_get_voice(AudioDeviceID *id)
|
||||
@ -69,7 +74,7 @@ static OSStatus coreaudio_get_framesizerange(AudioDeviceID id,
|
||||
AudioObjectPropertyAddress addr = {
|
||||
kAudioDevicePropertyBufferFrameSizeRange,
|
||||
kAudioDevicePropertyScopeOutput,
|
||||
kAudioObjectPropertyElementMaster
|
||||
kAudioObjectPropertyElementMain
|
||||
};
|
||||
|
||||
return AudioObjectGetPropertyData(id,
|
||||
@ -86,7 +91,7 @@ static OSStatus coreaudio_get_framesize(AudioDeviceID id, UInt32 *framesize)
|
||||
AudioObjectPropertyAddress addr = {
|
||||
kAudioDevicePropertyBufferFrameSize,
|
||||
kAudioDevicePropertyScopeOutput,
|
||||
kAudioObjectPropertyElementMaster
|
||||
kAudioObjectPropertyElementMain
|
||||
};
|
||||
|
||||
return AudioObjectGetPropertyData(id,
|
||||
@ -103,7 +108,7 @@ static OSStatus coreaudio_set_framesize(AudioDeviceID id, UInt32 *framesize)
|
||||
AudioObjectPropertyAddress addr = {
|
||||
kAudioDevicePropertyBufferFrameSize,
|
||||
kAudioDevicePropertyScopeOutput,
|
||||
kAudioObjectPropertyElementMaster
|
||||
kAudioObjectPropertyElementMain
|
||||
};
|
||||
|
||||
return AudioObjectSetPropertyData(id,
|
||||
@ -121,7 +126,7 @@ static OSStatus coreaudio_set_streamformat(AudioDeviceID id,
|
||||
AudioObjectPropertyAddress addr = {
|
||||
kAudioDevicePropertyStreamFormat,
|
||||
kAudioDevicePropertyScopeOutput,
|
||||
kAudioObjectPropertyElementMaster
|
||||
kAudioObjectPropertyElementMain
|
||||
};
|
||||
|
||||
return AudioObjectSetPropertyData(id,
|
||||
@ -138,7 +143,7 @@ static OSStatus coreaudio_get_isrunning(AudioDeviceID id, UInt32 *result)
|
||||
AudioObjectPropertyAddress addr = {
|
||||
kAudioDevicePropertyDeviceIsRunning,
|
||||
kAudioDevicePropertyScopeOutput,
|
||||
kAudioObjectPropertyElementMaster
|
||||
kAudioObjectPropertyElementMain
|
||||
};
|
||||
|
||||
return AudioObjectGetPropertyData(id,
|
||||
@ -206,7 +211,7 @@ static void coreaudio_logstatus (OSStatus status)
|
||||
AUD_log (AUDIO_CAP, "Reason: %s\n", str);
|
||||
}
|
||||
|
||||
static void GCC_FMT_ATTR (2, 3) coreaudio_logerr (
|
||||
static void G_GNUC_PRINTF (2, 3) coreaudio_logerr (
|
||||
OSStatus status,
|
||||
const char *fmt,
|
||||
...
|
||||
@ -221,7 +226,7 @@ static void GCC_FMT_ATTR (2, 3) coreaudio_logerr (
|
||||
coreaudio_logstatus (status);
|
||||
}
|
||||
|
||||
static void GCC_FMT_ATTR (3, 4) coreaudio_logerr2 (
|
||||
static void G_GNUC_PRINTF (3, 4) coreaudio_logerr2 (
|
||||
OSStatus status,
|
||||
const char *typ,
|
||||
const char *fmt,
|
||||
@ -283,6 +288,7 @@ static int coreaudio_buf_unlock (coreaudioVoiceOut *core, const char *fn_name)
|
||||
coreaudio_buf_unlock(core, "coreaudio_" #name); \
|
||||
return ret; \
|
||||
}
|
||||
COREAUDIO_WRAPPER_FUNC(buffer_get_free, size_t, (HWVoiceOut *hw), (hw))
|
||||
COREAUDIO_WRAPPER_FUNC(get_buffer_out, void *, (HWVoiceOut *hw, size_t *size),
|
||||
(hw, size))
|
||||
COREAUDIO_WRAPPER_FUNC(put_buffer_out, size_t,
|
||||
@ -333,12 +339,10 @@ static OSStatus audioDeviceIOProc(
|
||||
|
||||
len = frameCount * hw->info.bytes_per_frame;
|
||||
while (len) {
|
||||
size_t write_len;
|
||||
ssize_t start = ((ssize_t) hw->pos_emul) - hw->pending_emul;
|
||||
if (start < 0) {
|
||||
start += hw->size_emul;
|
||||
}
|
||||
assert(start >= 0 && start < hw->size_emul);
|
||||
size_t write_len, start;
|
||||
|
||||
start = audio_ring_posb(hw->pos_emul, hw->pending_emul, hw->size_emul);
|
||||
assert(start < hw->size_emul);
|
||||
|
||||
write_len = MIN(MIN(hw->pending_emul, len),
|
||||
hw->size_emul - start);
|
||||
@ -541,7 +545,6 @@ static OSStatus handle_voice_change(
|
||||
const AudioObjectPropertyAddress *in_addresses,
|
||||
void *in_client_data)
|
||||
{
|
||||
OSStatus status;
|
||||
coreaudioVoiceOut *core = in_client_data;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
@ -550,13 +553,12 @@ static OSStatus handle_voice_change(
|
||||
fini_out_device(core);
|
||||
}
|
||||
|
||||
status = init_out_device(core);
|
||||
if (!status) {
|
||||
if (!init_out_device(core)) {
|
||||
update_device_playback_state(core);
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
return status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
|
||||
@ -604,6 +606,8 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
|
||||
coreaudio_playback_logerr(status,
|
||||
"Could not remove voice property change listener\n");
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -654,6 +658,8 @@ static struct audio_pcm_ops coreaudio_pcm_ops = {
|
||||
.fini_out = coreaudio_fini_out,
|
||||
/* wrapper for audio_generic_write */
|
||||
.write = coreaudio_write,
|
||||
/* wrapper for audio_generic_buffer_get_free */
|
||||
.buffer_get_free = coreaudio_buffer_get_free,
|
||||
/* wrapper for audio_generic_get_buffer_out */
|
||||
.get_buffer_out = coreaudio_get_buffer_out,
|
||||
/* wrapper for audio_generic_put_buffer_out */
|
@ -122,7 +122,7 @@ static size_t dbus_put_buffer_out(HWVoiceOut *hw, void *buf, size_t size)
|
||||
return size;
|
||||
}
|
||||
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
#if HOST_BIG_ENDIAN
|
||||
#define AUDIO_HOST_BE TRUE
|
||||
#else
|
||||
#define AUDIO_HOST_BE FALSE
|
||||
|
@ -222,7 +222,7 @@ static void dsound_log_hresult (HRESULT hr)
|
||||
AUD_log (AUDIO_CAP, "Reason: %s\n", str);
|
||||
}
|
||||
|
||||
static void GCC_FMT_ATTR (2, 3) dsound_logerr (
|
||||
static void G_GNUC_PRINTF (2, 3) dsound_logerr (
|
||||
HRESULT hr,
|
||||
const char *fmt,
|
||||
...
|
||||
@ -237,7 +237,7 @@ static void GCC_FMT_ATTR (2, 3) dsound_logerr (
|
||||
dsound_log_hresult (hr);
|
||||
}
|
||||
|
||||
static void GCC_FMT_ATTR (3, 4) dsound_logerr2 (
|
||||
static void G_GNUC_PRINTF (3, 4) dsound_logerr2 (
|
||||
HRESULT hr,
|
||||
const char *typ,
|
||||
const char *fmt,
|
||||
@ -427,22 +427,18 @@ static void dsound_enable_out(HWVoiceOut *hw, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
static void *dsound_get_buffer_out(HWVoiceOut *hw, size_t *size)
|
||||
static size_t dsound_buffer_get_free(HWVoiceOut *hw)
|
||||
{
|
||||
DSoundVoiceOut *ds = (DSoundVoiceOut *) hw;
|
||||
LPDIRECTSOUNDBUFFER dsb = ds->dsound_buffer;
|
||||
HRESULT hr;
|
||||
DWORD ppos, wpos, act_size;
|
||||
size_t req_size;
|
||||
int err;
|
||||
void *ret;
|
||||
DWORD ppos, wpos;
|
||||
|
||||
hr = IDirectSoundBuffer_GetCurrentPosition(
|
||||
dsb, &ppos, ds->first_time ? &wpos : NULL);
|
||||
if (FAILED(hr)) {
|
||||
dsound_logerr(hr, "Could not get playback buffer position\n");
|
||||
*size = 0;
|
||||
return NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ds->first_time) {
|
||||
@ -450,13 +446,20 @@ static void *dsound_get_buffer_out(HWVoiceOut *hw, size_t *size)
|
||||
ds->first_time = false;
|
||||
}
|
||||
|
||||
req_size = audio_ring_dist(ppos, hw->pos_emul, hw->size_emul);
|
||||
req_size = MIN(req_size, hw->size_emul - hw->pos_emul);
|
||||
return audio_ring_dist(ppos, hw->pos_emul, hw->size_emul);
|
||||
}
|
||||
|
||||
if (req_size == 0) {
|
||||
*size = 0;
|
||||
return NULL;
|
||||
}
|
||||
static void *dsound_get_buffer_out(HWVoiceOut *hw, size_t *size)
|
||||
{
|
||||
DSoundVoiceOut *ds = (DSoundVoiceOut *)hw;
|
||||
LPDIRECTSOUNDBUFFER dsb = ds->dsound_buffer;
|
||||
DWORD act_size;
|
||||
size_t req_size;
|
||||
int err;
|
||||
void *ret;
|
||||
|
||||
req_size = MIN(*size, hw->size_emul - hw->pos_emul);
|
||||
assert(req_size > 0);
|
||||
|
||||
err = dsound_lock_out(dsb, &hw->info, hw->pos_emul, req_size, &ret, NULL,
|
||||
&act_size, NULL, false, ds->s);
|
||||
@ -620,7 +623,7 @@ static void *dsound_audio_init(Audiodev *dev)
|
||||
{
|
||||
int err;
|
||||
HRESULT hr;
|
||||
dsound *s = g_malloc0(sizeof(dsound));
|
||||
dsound *s = g_new0(dsound, 1);
|
||||
AudiodevDsoundOptions *dso;
|
||||
|
||||
assert(dev->driver == AUDIODEV_DRIVER_DSOUND);
|
||||
@ -699,6 +702,7 @@ static struct audio_pcm_ops dsound_pcm_ops = {
|
||||
.init_out = dsound_init_out,
|
||||
.fini_out = dsound_fini_out,
|
||||
.write = audio_generic_write,
|
||||
.buffer_get_free = dsound_buffer_get_free,
|
||||
.get_buffer_out = dsound_get_buffer_out,
|
||||
.put_buffer_out = dsound_put_buffer_out,
|
||||
.enable_out = dsound_enable_out,
|
||||
|
@ -97,9 +97,9 @@ static void qjack_buffer_create(QJackBuffer *buffer, int channels, int frames)
|
||||
buffer->used = 0;
|
||||
buffer->rptr = 0;
|
||||
buffer->wptr = 0;
|
||||
buffer->data = g_malloc(channels * sizeof(float *));
|
||||
buffer->data = g_new(float *, channels);
|
||||
for (int i = 0; i < channels; ++i) {
|
||||
buffer->data[i] = g_malloc(frames * sizeof(float));
|
||||
buffer->data[i] = g_new(float, frames);
|
||||
}
|
||||
}
|
||||
|
||||
@ -453,7 +453,7 @@ static int qjack_client_init(QJackClient *c)
|
||||
jack_on_shutdown(c->client, qjack_shutdown, c);
|
||||
|
||||
/* allocate and register the ports */
|
||||
c->port = g_malloc(sizeof(jack_port_t *) * c->nchannels);
|
||||
c->port = g_new(jack_port_t *, c->nchannels);
|
||||
for (int i = 0; i < c->nchannels; ++i) {
|
||||
|
||||
char port_name[16];
|
||||
@ -483,8 +483,8 @@ static int qjack_client_init(QJackClient *c)
|
||||
c->buffersize = 512;
|
||||
}
|
||||
|
||||
/* create a 2 period buffer */
|
||||
qjack_buffer_create(&c->fifo, c->nchannels, c->buffersize * 2);
|
||||
/* create a 3 period buffer */
|
||||
qjack_buffer_create(&c->fifo, c->nchannels, c->buffersize * 3);
|
||||
|
||||
qjack_client_connect_ports(c);
|
||||
c->state = QJACK_STATE_RUNNING;
|
||||
@ -652,6 +652,7 @@ static struct audio_pcm_ops jack_pcm_ops = {
|
||||
.init_out = qjack_init_out,
|
||||
.fini_out = qjack_fini_out,
|
||||
.write = qjack_write,
|
||||
.buffer_get_free = audio_generic_buffer_get_free,
|
||||
.run_buffer_out = audio_generic_run_buffer_out,
|
||||
.enable_out = qjack_enable_out,
|
||||
|
||||
|
@ -7,7 +7,7 @@ softmmu_ss.add(files(
|
||||
'wavcapture.c',
|
||||
))
|
||||
|
||||
softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.c'))
|
||||
softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.m'))
|
||||
softmmu_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c'))
|
||||
|
||||
audio_modules = {}
|
||||
@ -28,7 +28,7 @@ endforeach
|
||||
|
||||
if dbus_display
|
||||
module_ss = ss.source_set()
|
||||
module_ss.add(when: gio, if_true: files('dbusaudio.c'))
|
||||
module_ss.add(when: [gio, pixman, opengl, 'CONFIG_GIO'], if_true: files('dbusaudio.c'))
|
||||
audio_modules += {'dbus': module_ss}
|
||||
endif
|
||||
|
||||
|
@ -342,13 +342,13 @@ f_sample *mixeng_clip_float[2] = {
|
||||
void audio_sample_to_uint64(const void *samples, int pos,
|
||||
uint64_t *left, uint64_t *right)
|
||||
{
|
||||
const struct st_sample *sample = samples;
|
||||
sample += pos;
|
||||
#ifdef FLOAT_MIXENG
|
||||
error_report(
|
||||
"Coreaudio and floating point samples are not supported by replay yet");
|
||||
abort();
|
||||
#else
|
||||
const struct st_sample *sample = samples;
|
||||
sample += pos;
|
||||
*left = sample->l;
|
||||
*right = sample->r;
|
||||
#endif
|
||||
@ -357,13 +357,13 @@ void audio_sample_to_uint64(const void *samples, int pos,
|
||||
void audio_sample_from_uint64(void *samples, int pos,
|
||||
uint64_t left, uint64_t right)
|
||||
{
|
||||
struct st_sample *sample = samples;
|
||||
sample += pos;
|
||||
#ifdef FLOAT_MIXENG
|
||||
error_report(
|
||||
"Coreaudio and floating point samples are not supported by replay yet");
|
||||
abort();
|
||||
#else
|
||||
struct st_sample *sample = samples;
|
||||
sample += pos;
|
||||
sample->l = left;
|
||||
sample->r = right;
|
||||
#endif
|
||||
|
@ -118,6 +118,7 @@ static struct audio_pcm_ops no_pcm_ops = {
|
||||
.init_out = no_init_out,
|
||||
.fini_out = no_fini_out,
|
||||
.write = no_write,
|
||||
.buffer_get_free = audio_generic_buffer_get_free,
|
||||
.run_buffer_out = audio_generic_run_buffer_out,
|
||||
.enable_out = no_enable_out,
|
||||
|
||||
|
@ -63,7 +63,7 @@ struct oss_params {
|
||||
int fragsize;
|
||||
};
|
||||
|
||||
static void GCC_FMT_ATTR (2, 3) oss_logerr (int err, const char *fmt, ...)
|
||||
static void G_GNUC_PRINTF (2, 3) oss_logerr (int err, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
@ -74,7 +74,7 @@ static void GCC_FMT_ATTR (2, 3) oss_logerr (int err, const char *fmt, ...)
|
||||
AUD_log (AUDIO_CAP, "Reason: %s\n", strerror (err));
|
||||
}
|
||||
|
||||
static void GCC_FMT_ATTR (3, 4) oss_logerr2 (
|
||||
static void G_GNUC_PRINTF (3, 4) oss_logerr2 (
|
||||
int err,
|
||||
const char *typ,
|
||||
const char *fmt,
|
||||
@ -389,11 +389,23 @@ static void oss_run_buffer_out(HWVoiceOut *hw)
|
||||
}
|
||||
}
|
||||
|
||||
static size_t oss_buffer_get_free(HWVoiceOut *hw)
|
||||
{
|
||||
OSSVoiceOut *oss = (OSSVoiceOut *)hw;
|
||||
|
||||
if (oss->mmapped) {
|
||||
return oss_get_available_bytes(oss);
|
||||
} else {
|
||||
return audio_generic_buffer_get_free(hw);
|
||||
}
|
||||
}
|
||||
|
||||
static void *oss_get_buffer_out(HWVoiceOut *hw, size_t *size)
|
||||
{
|
||||
OSSVoiceOut *oss = (OSSVoiceOut *) hw;
|
||||
OSSVoiceOut *oss = (OSSVoiceOut *)hw;
|
||||
|
||||
if (oss->mmapped) {
|
||||
*size = MIN(oss_get_available_bytes(oss), hw->size_emul - hw->pos_emul);
|
||||
*size = hw->size_emul - hw->pos_emul;
|
||||
return hw->buf_emul + hw->pos_emul;
|
||||
} else {
|
||||
return audio_generic_get_buffer_out(hw, size);
|
||||
@ -750,6 +762,7 @@ static struct audio_pcm_ops oss_pcm_ops = {
|
||||
.init_out = oss_init_out,
|
||||
.fini_out = oss_fini_out,
|
||||
.write = oss_write,
|
||||
.buffer_get_free = oss_buffer_get_free,
|
||||
.run_buffer_out = oss_run_buffer_out,
|
||||
.get_buffer_out = oss_get_buffer_out,
|
||||
.put_buffer_out = oss_put_buffer_out,
|
||||
|
@ -43,7 +43,7 @@ typedef struct {
|
||||
|
||||
static void qpa_conn_fini(PAConnection *c);
|
||||
|
||||
static void GCC_FMT_ATTR (2, 3) qpa_logerr (int err, const char *fmt, ...)
|
||||
static void G_GNUC_PRINTF (2, 3) qpa_logerr (int err, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
@ -201,13 +201,11 @@ unlock_and_fail:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *qpa_get_buffer_out(HWVoiceOut *hw, size_t *size)
|
||||
static size_t qpa_buffer_get_free(HWVoiceOut *hw)
|
||||
{
|
||||
PAVoiceOut *p = (PAVoiceOut *) hw;
|
||||
PAVoiceOut *p = (PAVoiceOut *)hw;
|
||||
PAConnection *c = p->g->conn;
|
||||
void *ret;
|
||||
size_t l;
|
||||
int r;
|
||||
|
||||
pa_threaded_mainloop_lock(c->mainloop);
|
||||
|
||||
@ -216,7 +214,6 @@ static void *qpa_get_buffer_out(HWVoiceOut *hw, size_t *size)
|
||||
if (pa_stream_get_state(p->stream) != PA_STREAM_READY) {
|
||||
/* wait for stream to become ready */
|
||||
l = 0;
|
||||
ret = NULL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -224,16 +221,33 @@ static void *qpa_get_buffer_out(HWVoiceOut *hw, size_t *size)
|
||||
CHECK_SUCCESS_GOTO(c, l != (size_t) -1, unlock_and_fail,
|
||||
"pa_stream_writable_size failed\n");
|
||||
|
||||
unlock:
|
||||
pa_threaded_mainloop_unlock(c->mainloop);
|
||||
return l;
|
||||
|
||||
unlock_and_fail:
|
||||
pa_threaded_mainloop_unlock(c->mainloop);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *qpa_get_buffer_out(HWVoiceOut *hw, size_t *size)
|
||||
{
|
||||
PAVoiceOut *p = (PAVoiceOut *)hw;
|
||||
PAConnection *c = p->g->conn;
|
||||
void *ret;
|
||||
int r;
|
||||
|
||||
pa_threaded_mainloop_lock(c->mainloop);
|
||||
|
||||
CHECK_DEAD_GOTO(c, p->stream, unlock_and_fail,
|
||||
"pa_threaded_mainloop_lock failed\n");
|
||||
|
||||
*size = -1;
|
||||
r = pa_stream_begin_write(p->stream, &ret, size);
|
||||
CHECK_SUCCESS_GOTO(c, r >= 0, unlock_and_fail,
|
||||
"pa_stream_begin_write failed\n");
|
||||
|
||||
unlock:
|
||||
pa_threaded_mainloop_unlock(c->mainloop);
|
||||
if (*size > l) {
|
||||
*size = l;
|
||||
}
|
||||
return ret;
|
||||
|
||||
unlock_and_fail:
|
||||
@ -535,11 +549,8 @@ static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as,
|
||||
}
|
||||
|
||||
audio_pcm_init_info (&hw->info, &obt_as);
|
||||
/*
|
||||
* This is wrong. hw->samples counts in frames. hw->samples will be
|
||||
* number of channels times larger than expected.
|
||||
*/
|
||||
hw->samples = audio_buffer_samples(
|
||||
/* hw->samples counts in frames */
|
||||
hw->samples = audio_buffer_frames(
|
||||
qapi_AudiodevPaPerDirectionOptions_base(ppdo), &obt_as, 46440);
|
||||
|
||||
return 0;
|
||||
@ -587,11 +598,8 @@ static int qpa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
|
||||
}
|
||||
|
||||
audio_pcm_init_info (&hw->info, &obt_as);
|
||||
/*
|
||||
* This is wrong. hw->samples counts in frames. hw->samples will be
|
||||
* number of channels times larger than expected.
|
||||
*/
|
||||
hw->samples = audio_buffer_samples(
|
||||
/* hw->samples counts in frames */
|
||||
hw->samples = audio_buffer_frames(
|
||||
qapi_AudiodevPaPerDirectionOptions_base(ppdo), &obt_as, 46440);
|
||||
|
||||
return 0;
|
||||
@ -744,7 +752,7 @@ static int qpa_validate_per_direction_opts(Audiodev *dev,
|
||||
{
|
||||
if (!pdo->has_latency) {
|
||||
pdo->has_latency = true;
|
||||
pdo->latency = 15000;
|
||||
pdo->latency = 46440;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -752,7 +760,7 @@ static int qpa_validate_per_direction_opts(Audiodev *dev,
|
||||
/* common */
|
||||
static void *qpa_conn_init(const char *server)
|
||||
{
|
||||
PAConnection *c = g_malloc0(sizeof(PAConnection));
|
||||
PAConnection *c = g_new0(PAConnection, 1);
|
||||
QTAILQ_INSERT_TAIL(&pa_conns, c, list);
|
||||
|
||||
c->mainloop = pa_threaded_mainloop_new();
|
||||
@ -841,7 +849,7 @@ static void *qpa_audio_init(Audiodev *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
g = g_malloc0(sizeof(paaudio));
|
||||
g = g_new0(paaudio, 1);
|
||||
server = popts->has_server ? popts->server : NULL;
|
||||
|
||||
g->dev = dev;
|
||||
@ -901,6 +909,7 @@ static struct audio_pcm_ops qpa_pcm_ops = {
|
||||
.init_out = qpa_init_out,
|
||||
.fini_out = qpa_fini_out,
|
||||
.write = qpa_write,
|
||||
.buffer_get_free = qpa_buffer_get_free,
|
||||
.get_buffer_out = qpa_get_buffer_out,
|
||||
.put_buffer_out = qpa_put_buffer_out,
|
||||
.volume_out = qpa_volume_out,
|
||||
|
@ -55,7 +55,7 @@ typedef struct SDLVoiceIn {
|
||||
SDL_AudioDeviceID devid;
|
||||
} SDLVoiceIn;
|
||||
|
||||
static void GCC_FMT_ATTR (1, 2) sdl_logerr (const char *fmt, ...)
|
||||
static void G_GNUC_PRINTF (1, 2) sdl_logerr (const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
@ -224,12 +224,11 @@ static void sdl_callback_out(void *opaque, Uint8 *buf, int len)
|
||||
/* dolog("callback_out: len=%d avail=%zu\n", len, hw->pending_emul); */
|
||||
|
||||
while (hw->pending_emul && len) {
|
||||
size_t write_len;
|
||||
ssize_t start = (ssize_t)hw->pos_emul - hw->pending_emul;
|
||||
if (start < 0) {
|
||||
start += hw->size_emul;
|
||||
}
|
||||
assert(start >= 0 && start < hw->size_emul);
|
||||
size_t write_len, start;
|
||||
|
||||
start = audio_ring_posb(hw->pos_emul, hw->pending_emul,
|
||||
hw->size_emul);
|
||||
assert(start < hw->size_emul);
|
||||
|
||||
write_len = MIN(MIN(hw->pending_emul, len),
|
||||
hw->size_emul - start);
|
||||
@ -310,6 +309,7 @@ static void sdl_callback_in(void *opaque, Uint8 *buf, int len)
|
||||
SDL_UnlockAudioDevice(sdl->devid); \
|
||||
}
|
||||
|
||||
SDL_WRAPPER_FUNC(buffer_get_free, size_t, (HWVoiceOut *hw), (hw), Out)
|
||||
SDL_WRAPPER_FUNC(get_buffer_out, void *, (HWVoiceOut *hw, size_t *size),
|
||||
(hw, size), Out)
|
||||
SDL_WRAPPER_FUNC(put_buffer_out, size_t,
|
||||
@ -347,11 +347,8 @@ static int sdl_init_out(HWVoiceOut *hw, struct audsettings *as,
|
||||
req.freq = as->freq;
|
||||
req.format = aud_to_sdlfmt (as->fmt);
|
||||
req.channels = as->nchannels;
|
||||
/*
|
||||
* This is wrong. SDL samples are QEMU frames. The buffer size will be
|
||||
* the requested buffer size multiplied by the number of channels.
|
||||
*/
|
||||
req.samples = audio_buffer_samples(
|
||||
/* SDL samples are QEMU frames */
|
||||
req.samples = audio_buffer_frames(
|
||||
qapi_AudiodevSdlPerDirectionOptions_base(spdo), as, 11610);
|
||||
req.callback = sdl_callback_out;
|
||||
req.userdata = sdl;
|
||||
@ -472,6 +469,8 @@ static struct audio_pcm_ops sdl_pcm_ops = {
|
||||
.fini_out = sdl_fini_out,
|
||||
/* wrapper for audio_generic_write */
|
||||
.write = sdl_write,
|
||||
/* wrapper for audio_generic_buffer_get_free */
|
||||
.buffer_get_free = sdl_buffer_get_free,
|
||||
/* wrapper for audio_generic_get_buffer_out */
|
||||
.get_buffer_out = sdl_get_buffer_out,
|
||||
/* wrapper for audio_generic_put_buffer_out */
|
||||
|
@ -197,6 +197,7 @@ static struct audio_pcm_ops wav_pcm_ops = {
|
||||
.init_out = wav_init_out,
|
||||
.fini_out = wav_fini_out,
|
||||
.write = wav_write_out,
|
||||
.buffer_get_free = audio_generic_buffer_get_free,
|
||||
.run_buffer_out = audio_generic_run_buffer_out,
|
||||
.enable_out = wav_enable_out,
|
||||
};
|
||||
|
@ -39,7 +39,7 @@ cryptodev_backend_new_client(const char *model,
|
||||
{
|
||||
CryptoDevBackendClient *cc;
|
||||
|
||||
cc = g_malloc0(sizeof(CryptoDevBackendClient));
|
||||
cc = g_new0(CryptoDevBackendClient, 1);
|
||||
cc->model = g_strdup(model);
|
||||
if (name) {
|
||||
cc->name = g_strdup(name);
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
#include "qapi/error.h"
|
||||
#include "sysemu/hostmem.h"
|
||||
|
@ -319,7 +319,7 @@ size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
|
||||
#else
|
||||
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
|
||||
{
|
||||
return qemu_real_host_page_size;
|
||||
return qemu_real_host_page_size();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -15,7 +15,7 @@ softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c'))
|
||||
softmmu_ss.add(when: ['CONFIG_VHOST_USER', 'CONFIG_VIRTIO'], if_true: files('vhost-user.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c'))
|
||||
softmmu_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VHOST_CRYPTO'], if_true: files('cryptodev-vhost-user.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_GIO', if_true: [files('dbus-vmstate.c'), gio])
|
||||
softmmu_ss.add(when: gio, if_true: files('dbus-vmstate.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c'))
|
||||
|
||||
subdir('tpm')
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/sockets.h"
|
||||
|
@ -53,10 +53,31 @@ static int coroutine_fn blockdev_amend_run(Job *job, Error **errp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int blockdev_amend_pre_run(BlockdevAmendJob *s, Error **errp)
|
||||
{
|
||||
if (s->bs->drv->bdrv_amend_pre_run) {
|
||||
return s->bs->drv->bdrv_amend_pre_run(s->bs, errp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void blockdev_amend_free(Job *job)
|
||||
{
|
||||
BlockdevAmendJob *s = container_of(job, BlockdevAmendJob, common);
|
||||
|
||||
if (s->bs->drv->bdrv_amend_clean) {
|
||||
s->bs->drv->bdrv_amend_clean(s->bs);
|
||||
}
|
||||
|
||||
bdrv_unref(s->bs);
|
||||
}
|
||||
|
||||
static const JobDriver blockdev_amend_job_driver = {
|
||||
.instance_size = sizeof(BlockdevAmendJob),
|
||||
.job_type = JOB_TYPE_AMEND,
|
||||
.run = blockdev_amend_run,
|
||||
.free = blockdev_amend_free,
|
||||
};
|
||||
|
||||
void qmp_x_blockdev_amend(const char *job_id,
|
||||
@ -110,8 +131,15 @@ void qmp_x_blockdev_amend(const char *job_id,
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_ref(bs);
|
||||
s->bs = bs,
|
||||
s->opts = QAPI_CLONE(BlockdevAmendOptions, options),
|
||||
s->force = has_force ? force : false;
|
||||
|
||||
if (blockdev_amend_pre_run(s, errp)) {
|
||||
job_early_fail(&s->common);
|
||||
return;
|
||||
}
|
||||
|
||||
job_start(&s->common);
|
||||
}
|
||||
|
@ -372,6 +372,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
|
||||
assert(bs);
|
||||
assert(target);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/* QMP interface protects us from these cases */
|
||||
assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/memalign.h"
|
||||
|
||||
typedef struct {
|
||||
BdrvChild *test_file;
|
||||
@ -43,7 +44,7 @@ typedef struct BlkverifyRequest {
|
||||
QEMUIOVector *raw_qiov; /* cloned I/O vector for raw file */
|
||||
} BlkverifyRequest;
|
||||
|
||||
static void GCC_FMT_ATTR(2, 3) blkverify_err(BlkverifyRequest *r,
|
||||
static void G_GNUC_PRINTF(2, 3) blkverify_err(BlkverifyRequest *r,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -17,11 +17,13 @@
|
||||
#include "trace.h"
|
||||
#include "qapi/error.h"
|
||||
#include "block/block-copy.h"
|
||||
#include "block/reqlist.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "qemu/units.h"
|
||||
#include "qemu/coroutine.h"
|
||||
#include "block/aio_task.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/memalign.h"
|
||||
|
||||
#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
|
||||
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
|
||||
@ -83,7 +85,6 @@ typedef struct BlockCopyTask {
|
||||
*/
|
||||
BlockCopyState *s;
|
||||
BlockCopyCallState *call_state;
|
||||
int64_t offset;
|
||||
/*
|
||||
* @method can also be set again in the while loop of
|
||||
* block_copy_dirty_clusters(), but it is never accessed concurrently
|
||||
@ -94,21 +95,17 @@ typedef struct BlockCopyTask {
|
||||
BlockCopyMethod method;
|
||||
|
||||
/*
|
||||
* Fields whose state changes throughout the execution
|
||||
* Protected by lock in BlockCopyState.
|
||||
* Generally, req is protected by lock in BlockCopyState, Still req.offset
|
||||
* is only set on task creation, so may be read concurrently after creation.
|
||||
* req.bytes is changed at most once, and need only protecting the case of
|
||||
* parallel read while updating @bytes value in block_copy_task_shrink().
|
||||
*/
|
||||
CoQueue wait_queue; /* coroutines blocked on this task */
|
||||
/*
|
||||
* Only protect the case of parallel read while updating @bytes
|
||||
* value in block_copy_task_shrink().
|
||||
*/
|
||||
int64_t bytes;
|
||||
QLIST_ENTRY(BlockCopyTask) list;
|
||||
BlockReq req;
|
||||
} BlockCopyTask;
|
||||
|
||||
static int64_t task_end(BlockCopyTask *task)
|
||||
{
|
||||
return task->offset + task->bytes;
|
||||
return task->req.offset + task->req.bytes;
|
||||
}
|
||||
|
||||
typedef struct BlockCopyState {
|
||||
@ -136,7 +133,7 @@ typedef struct BlockCopyState {
|
||||
CoMutex lock;
|
||||
int64_t in_flight_bytes;
|
||||
BlockCopyMethod method;
|
||||
QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
|
||||
BlockReqList reqs;
|
||||
QLIST_HEAD(, BlockCopyCallState) calls;
|
||||
/*
|
||||
* skip_unallocated:
|
||||
@ -160,42 +157,6 @@ typedef struct BlockCopyState {
|
||||
RateLimit rate_limit;
|
||||
} BlockCopyState;
|
||||
|
||||
/* Called with lock held */
|
||||
static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
BlockCopyTask *t;
|
||||
|
||||
QLIST_FOREACH(t, &s->tasks, list) {
|
||||
if (offset + bytes > t->offset && offset < t->offset + t->bytes) {
|
||||
return t;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no intersecting tasks return false. Otherwise, wait for the
|
||||
* first found intersecting tasks to finish and return true.
|
||||
*
|
||||
* Called with lock held. May temporary release the lock.
|
||||
* Return value of 0 proves that lock was NOT released.
|
||||
*/
|
||||
static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
|
||||
int64_t bytes)
|
||||
{
|
||||
BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
|
||||
|
||||
if (!task) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qemu_co_queue_wait(&task->wait_queue, &s->lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Called with lock held */
|
||||
static int64_t block_copy_chunk_size(BlockCopyState *s)
|
||||
{
|
||||
@ -239,7 +200,7 @@ block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
|
||||
bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
|
||||
|
||||
/* region is dirty, so no existent tasks possible in it */
|
||||
assert(!find_conflicting_task(s, offset, bytes));
|
||||
assert(!reqlist_find_conflict(&s->reqs, offset, bytes));
|
||||
|
||||
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
||||
s->in_flight_bytes += bytes;
|
||||
@ -249,12 +210,9 @@ block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
|
||||
.task.func = block_copy_task_entry,
|
||||
.s = s,
|
||||
.call_state = call_state,
|
||||
.offset = offset,
|
||||
.bytes = bytes,
|
||||
.method = s->method,
|
||||
};
|
||||
qemu_co_queue_init(&task->wait_queue);
|
||||
QLIST_INSERT_HEAD(&s->tasks, task, list);
|
||||
reqlist_init_req(&s->reqs, &task->req, offset, bytes);
|
||||
|
||||
return task;
|
||||
}
|
||||
@ -270,34 +228,34 @@ static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
|
||||
int64_t new_bytes)
|
||||
{
|
||||
QEMU_LOCK_GUARD(&task->s->lock);
|
||||
if (new_bytes == task->bytes) {
|
||||
if (new_bytes == task->req.bytes) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(new_bytes > 0 && new_bytes < task->bytes);
|
||||
assert(new_bytes > 0 && new_bytes < task->req.bytes);
|
||||
|
||||
task->s->in_flight_bytes -= task->bytes - new_bytes;
|
||||
task->s->in_flight_bytes -= task->req.bytes - new_bytes;
|
||||
bdrv_set_dirty_bitmap(task->s->copy_bitmap,
|
||||
task->offset + new_bytes, task->bytes - new_bytes);
|
||||
task->req.offset + new_bytes,
|
||||
task->req.bytes - new_bytes);
|
||||
|
||||
task->bytes = new_bytes;
|
||||
qemu_co_queue_restart_all(&task->wait_queue);
|
||||
reqlist_shrink_req(&task->req, new_bytes);
|
||||
}
|
||||
|
||||
static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
|
||||
{
|
||||
QEMU_LOCK_GUARD(&task->s->lock);
|
||||
task->s->in_flight_bytes -= task->bytes;
|
||||
task->s->in_flight_bytes -= task->req.bytes;
|
||||
if (ret < 0) {
|
||||
bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
|
||||
bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->req.offset,
|
||||
task->req.bytes);
|
||||
}
|
||||
QLIST_REMOVE(task, list);
|
||||
if (task->s->progress) {
|
||||
progress_set_remaining(task->s->progress,
|
||||
bdrv_get_dirty_count(task->s->copy_bitmap) +
|
||||
task->s->in_flight_bytes);
|
||||
}
|
||||
qemu_co_queue_restart_all(&task->wait_queue);
|
||||
reqlist_remove_req(&task->req);
|
||||
}
|
||||
|
||||
void block_copy_state_free(BlockCopyState *s)
|
||||
@ -384,8 +342,10 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
|
||||
}
|
||||
|
||||
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
||||
const BdrvDirtyBitmap *bitmap,
|
||||
Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
BlockCopyState *s;
|
||||
int64_t cluster_size;
|
||||
BdrvDirtyBitmap *copy_bitmap;
|
||||
@ -402,6 +362,17 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
||||
return NULL;
|
||||
}
|
||||
bdrv_disable_dirty_bitmap(copy_bitmap);
|
||||
if (bitmap) {
|
||||
if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) {
|
||||
error_prepend(errp, "Failed to merge bitmap '%s' to internal "
|
||||
"copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap));
|
||||
bdrv_release_dirty_bitmap(copy_bitmap);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
bdrv_set_dirty_bitmap(copy_bitmap, 0,
|
||||
bdrv_dirty_bitmap_size(copy_bitmap));
|
||||
}
|
||||
|
||||
/*
|
||||
* If source is in backing chain of target assume that target is going to be
|
||||
@ -437,7 +408,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
||||
|
||||
ratelimit_init(&s->rate_limit);
|
||||
qemu_co_mutex_init(&s->lock);
|
||||
QLIST_INIT(&s->tasks);
|
||||
QLIST_INIT(&s->reqs);
|
||||
QLIST_INIT(&s->calls);
|
||||
|
||||
return s;
|
||||
@ -470,7 +441,7 @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
|
||||
|
||||
aio_task_pool_wait_slot(pool);
|
||||
if (aio_task_pool_status(pool) < 0) {
|
||||
co_put_to_shres(task->s->mem, task->bytes);
|
||||
co_put_to_shres(task->s->mem, task->req.bytes);
|
||||
block_copy_task_end(task, -ECANCELED);
|
||||
g_free(task);
|
||||
return -ECANCELED;
|
||||
@ -583,7 +554,8 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
|
||||
BlockCopyMethod method = t->method;
|
||||
int ret;
|
||||
|
||||
ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read);
|
||||
ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method,
|
||||
&error_is_read);
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&s->lock) {
|
||||
if (s->method == t->method) {
|
||||
@ -596,10 +568,10 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
|
||||
t->call_state->error_is_read = error_is_read;
|
||||
}
|
||||
} else if (s->progress) {
|
||||
progress_work_done(s->progress, t->bytes);
|
||||
progress_work_done(s->progress, t->req.bytes);
|
||||
}
|
||||
}
|
||||
co_put_to_shres(s->mem, t->bytes);
|
||||
co_put_to_shres(s->mem, t->req.bytes);
|
||||
block_copy_task_end(t, ret);
|
||||
|
||||
return ret;
|
||||
@ -679,6 +651,18 @@ static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
|
||||
}
|
||||
}
|
||||
|
||||
void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes)
|
||||
{
|
||||
QEMU_LOCK_GUARD(&s->lock);
|
||||
|
||||
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
||||
if (s->progress) {
|
||||
progress_set_remaining(s->progress,
|
||||
bdrv_get_dirty_count(s->copy_bitmap) +
|
||||
s->in_flight_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset bits in copy_bitmap starting at offset if they represent unallocated
|
||||
* data in the image. May reset subsequent contiguous bits.
|
||||
@ -699,14 +683,7 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
|
||||
bytes = clusters * s->cluster_size;
|
||||
|
||||
if (!ret) {
|
||||
qemu_co_mutex_lock(&s->lock);
|
||||
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
||||
if (s->progress) {
|
||||
progress_set_remaining(s->progress,
|
||||
bdrv_get_dirty_count(s->copy_bitmap) +
|
||||
s->in_flight_bytes);
|
||||
}
|
||||
qemu_co_mutex_unlock(&s->lock);
|
||||
block_copy_reset(s, offset, bytes);
|
||||
}
|
||||
|
||||
*count = bytes;
|
||||
@ -753,22 +730,22 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
|
||||
trace_block_copy_skip_range(s, offset, bytes);
|
||||
break;
|
||||
}
|
||||
if (task->offset > offset) {
|
||||
trace_block_copy_skip_range(s, offset, task->offset - offset);
|
||||
if (task->req.offset > offset) {
|
||||
trace_block_copy_skip_range(s, offset, task->req.offset - offset);
|
||||
}
|
||||
|
||||
found_dirty = true;
|
||||
|
||||
ret = block_copy_block_status(s, task->offset, task->bytes,
|
||||
ret = block_copy_block_status(s, task->req.offset, task->req.bytes,
|
||||
&status_bytes);
|
||||
assert(ret >= 0); /* never fail */
|
||||
if (status_bytes < task->bytes) {
|
||||
if (status_bytes < task->req.bytes) {
|
||||
block_copy_task_shrink(task, status_bytes);
|
||||
}
|
||||
if (qatomic_read(&s->skip_unallocated) &&
|
||||
!(ret & BDRV_BLOCK_ALLOCATED)) {
|
||||
block_copy_task_end(task, 0);
|
||||
trace_block_copy_skip_range(s, task->offset, task->bytes);
|
||||
trace_block_copy_skip_range(s, task->req.offset, task->req.bytes);
|
||||
offset = task_end(task);
|
||||
bytes = end - offset;
|
||||
g_free(task);
|
||||
@ -789,11 +766,11 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
|
||||
}
|
||||
}
|
||||
|
||||
ratelimit_calculate_delay(&s->rate_limit, task->bytes);
|
||||
ratelimit_calculate_delay(&s->rate_limit, task->req.bytes);
|
||||
|
||||
trace_block_copy_process(s, task->offset);
|
||||
trace_block_copy_process(s, task->req.offset);
|
||||
|
||||
co_get_from_shres(s->mem, task->bytes);
|
||||
co_get_from_shres(s->mem, task->req.bytes);
|
||||
|
||||
offset = task_end(task);
|
||||
bytes = end - offset;
|
||||
@ -861,8 +838,8 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
|
||||
* Check that there is no task we still need to
|
||||
* wait to complete
|
||||
*/
|
||||
ret = block_copy_wait_one(s, call_state->offset,
|
||||
call_state->bytes);
|
||||
ret = reqlist_wait_one(&s->reqs, call_state->offset,
|
||||
call_state->bytes, &s->lock);
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* No pending tasks, but check again the bitmap in this
|
||||
@ -870,7 +847,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
|
||||
* between this and the critical section in
|
||||
* block_copy_dirty_clusters().
|
||||
*
|
||||
* block_copy_wait_one return value 0 also means that it
|
||||
* reqlist_wait_one return value 0 also means that it
|
||||
* didn't release the lock. So, we are still in the same
|
||||
* critical section, not interrupted by any concurrent
|
||||
* access to state.
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qmp/qerror.h"
|
||||
#include "qemu/ratelimit.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
|
||||
enum {
|
||||
@ -253,6 +254,8 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
uint64_t base_perms, iter_shared_perms;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
assert(top != bs);
|
||||
if (bdrv_skip_filters(top) == bdrv_skip_filters(base)) {
|
||||
error_setg(errp, "Invalid files for merge: top and base are the same");
|
||||
@ -432,6 +435,8 @@ int bdrv_commit(BlockDriverState *bs)
|
||||
QEMU_AUTO_VFREE uint8_t *buf = NULL;
|
||||
Error *local_err = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (!drv)
|
||||
return -ENOMEDIUM;
|
||||
|
||||
|
@ -33,10 +33,37 @@
|
||||
#include "block/block-copy.h"
|
||||
|
||||
#include "block/copy-before-write.h"
|
||||
#include "block/reqlist.h"
|
||||
|
||||
#include "qapi/qapi-visit-block-core.h"
|
||||
|
||||
typedef struct BDRVCopyBeforeWriteState {
|
||||
BlockCopyState *bcs;
|
||||
BdrvChild *target;
|
||||
|
||||
/*
|
||||
* @lock: protects access to @access_bitmap, @done_bitmap and
|
||||
* @frozen_read_reqs
|
||||
*/
|
||||
CoMutex lock;
|
||||
|
||||
/*
|
||||
* @access_bitmap: represents areas allowed for reading by fleecing user.
|
||||
* Reading from non-dirty areas leads to -EACCES.
|
||||
*/
|
||||
BdrvDirtyBitmap *access_bitmap;
|
||||
|
||||
/*
|
||||
* @done_bitmap: represents areas that was successfully copied to @target by
|
||||
* copy-before-write operations.
|
||||
*/
|
||||
BdrvDirtyBitmap *done_bitmap;
|
||||
|
||||
/*
|
||||
* @frozen_read_reqs: current read requests for fleecing user in bs->file
|
||||
* node. These areas must not be rewritten by guest.
|
||||
*/
|
||||
BlockReqList frozen_read_reqs;
|
||||
} BDRVCopyBeforeWriteState;
|
||||
|
||||
static coroutine_fn int cbw_co_preadv(
|
||||
@ -46,10 +73,20 @@ static coroutine_fn int cbw_co_preadv(
|
||||
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do copy-before-write operation.
|
||||
*
|
||||
* On failure guest request must be failed too.
|
||||
*
|
||||
* On success, we also wait for all in-flight fleecing read requests in source
|
||||
* node, and it's guaranteed that after cbw_do_copy_before_write() successful
|
||||
* return there are no such requests and they will never appear.
|
||||
*/
|
||||
static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVCopyBeforeWriteState *s = bs->opaque;
|
||||
int ret;
|
||||
uint64_t off, end;
|
||||
int64_t cluster_size = block_copy_cluster_size(s->bcs);
|
||||
|
||||
@ -60,7 +97,17 @@ static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
|
||||
off = QEMU_ALIGN_DOWN(offset, cluster_size);
|
||||
end = QEMU_ALIGN_UP(offset + bytes, cluster_size);
|
||||
|
||||
return block_copy(s->bcs, off, end - off, true);
|
||||
ret = block_copy(s->bcs, off, end - off, true);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&s->lock) {
|
||||
bdrv_set_dirty_bitmap(s->done_bitmap, off, end - off);
|
||||
reqlist_wait_all(&s->frozen_read_reqs, off, end - off, &s->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs,
|
||||
@ -108,6 +155,142 @@ static int coroutine_fn cbw_co_flush(BlockDriverState *bs)
|
||||
return bdrv_co_flush(bs->file->bs);
|
||||
}
|
||||
|
||||
/*
|
||||
* If @offset not accessible - return NULL.
|
||||
*
|
||||
* Otherwise, set @pnum to some bytes that accessible from @file (@file is set
|
||||
* to bs->file or to s->target). Return newly allocated BlockReq object that
|
||||
* should be than passed to cbw_snapshot_read_unlock().
|
||||
*
|
||||
* It's guaranteed that guest writes will not interact in the region until
|
||||
* cbw_snapshot_read_unlock() called.
|
||||
*/
|
||||
static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, BdrvChild **file)
|
||||
{
|
||||
BDRVCopyBeforeWriteState *s = bs->opaque;
|
||||
BlockReq *req = g_new(BlockReq, 1);
|
||||
bool done;
|
||||
|
||||
QEMU_LOCK_GUARD(&s->lock);
|
||||
|
||||
if (bdrv_dirty_bitmap_next_zero(s->access_bitmap, offset, bytes) != -1) {
|
||||
g_free(req);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
done = bdrv_dirty_bitmap_status(s->done_bitmap, offset, bytes, pnum);
|
||||
if (done) {
|
||||
/*
|
||||
* Special invalid BlockReq, that is handled in
|
||||
* cbw_snapshot_read_unlock(). We don't need to lock something to read
|
||||
* from s->target.
|
||||
*/
|
||||
*req = (BlockReq) {.offset = -1, .bytes = -1};
|
||||
*file = s->target;
|
||||
} else {
|
||||
reqlist_init_req(&s->frozen_read_reqs, req, offset, bytes);
|
||||
*file = bs->file;
|
||||
}
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
static void cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req)
|
||||
{
|
||||
BDRVCopyBeforeWriteState *s = bs->opaque;
|
||||
|
||||
if (req->offset == -1 && req->bytes == -1) {
|
||||
g_free(req);
|
||||
return;
|
||||
}
|
||||
|
||||
QEMU_LOCK_GUARD(&s->lock);
|
||||
|
||||
reqlist_remove_req(req);
|
||||
g_free(req);
|
||||
}
|
||||
|
||||
static coroutine_fn int
|
||||
cbw_co_preadv_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset)
|
||||
{
|
||||
BlockReq *req;
|
||||
BdrvChild *file;
|
||||
int ret;
|
||||
|
||||
/* TODO: upgrade to async loop using AioTask */
|
||||
while (bytes) {
|
||||
int64_t cur_bytes;
|
||||
|
||||
req = cbw_snapshot_read_lock(bs, offset, bytes, &cur_bytes, &file);
|
||||
if (!req) {
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = bdrv_co_preadv_part(file, offset, cur_bytes,
|
||||
qiov, qiov_offset, 0);
|
||||
cbw_snapshot_read_unlock(bs, req);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
bytes -= cur_bytes;
|
||||
offset += cur_bytes;
|
||||
qiov_offset += cur_bytes;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn
|
||||
cbw_co_snapshot_block_status(BlockDriverState *bs,
|
||||
bool want_zero, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
{
|
||||
BDRVCopyBeforeWriteState *s = bs->opaque;
|
||||
BlockReq *req;
|
||||
int ret;
|
||||
int64_t cur_bytes;
|
||||
BdrvChild *child;
|
||||
|
||||
req = cbw_snapshot_read_lock(bs, offset, bytes, &cur_bytes, &child);
|
||||
if (!req) {
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = bdrv_block_status(child->bs, offset, cur_bytes, pnum, map, file);
|
||||
if (child == s->target) {
|
||||
/*
|
||||
* We refer to s->target only for areas that we've written to it.
|
||||
* And we can not report unallocated blocks in s->target: this will
|
||||
* break generic block-status-above logic, that will go to
|
||||
* copy-before-write filtered child in this case.
|
||||
*/
|
||||
assert(ret & BDRV_BLOCK_ALLOCATED);
|
||||
}
|
||||
|
||||
cbw_snapshot_read_unlock(bs, req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn cbw_co_pdiscard_snapshot(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
BDRVCopyBeforeWriteState *s = bs->opaque;
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&s->lock) {
|
||||
bdrv_reset_dirty_bitmap(s->access_bitmap, offset, bytes);
|
||||
}
|
||||
|
||||
block_copy_reset(s->bcs, offset, bytes);
|
||||
|
||||
return bdrv_co_pdiscard(s->target, offset, bytes);
|
||||
}
|
||||
|
||||
static void cbw_refresh_filename(BlockDriverState *bs)
|
||||
{
|
||||
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
|
||||
@ -145,11 +328,54 @@ static void cbw_child_perm(BlockDriverState *bs, BdrvChild *c,
|
||||
}
|
||||
}
|
||||
|
||||
static bool cbw_parse_bitmap_option(QDict *options, BdrvDirtyBitmap **bitmap,
|
||||
Error **errp)
|
||||
{
|
||||
QDict *bitmap_qdict = NULL;
|
||||
BlockDirtyBitmap *bmp_param = NULL;
|
||||
Visitor *v = NULL;
|
||||
bool ret = false;
|
||||
|
||||
*bitmap = NULL;
|
||||
|
||||
qdict_extract_subqdict(options, &bitmap_qdict, "bitmap.");
|
||||
if (!qdict_size(bitmap_qdict)) {
|
||||
ret = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
v = qobject_input_visitor_new_flat_confused(bitmap_qdict, errp);
|
||||
if (!v) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
visit_type_BlockDirtyBitmap(v, NULL, &bmp_param, errp);
|
||||
if (!bmp_param) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
*bitmap = block_dirty_bitmap_lookup(bmp_param->node, bmp_param->name, NULL,
|
||||
errp);
|
||||
if (!*bitmap) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = true;
|
||||
|
||||
out:
|
||||
qapi_free_BlockDirtyBitmap(bmp_param);
|
||||
visit_free(v);
|
||||
qobject_unref(bitmap_qdict);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVCopyBeforeWriteState *s = bs->opaque;
|
||||
BdrvDirtyBitmap *copy_bitmap;
|
||||
BdrvDirtyBitmap *bitmap = NULL;
|
||||
int64_t cluster_size;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
@ -164,6 +390,10 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!cbw_parse_bitmap_option(options, &bitmap, errp)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bs->total_sectors = bs->file->bs->total_sectors;
|
||||
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
|
||||
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
|
||||
@ -171,14 +401,32 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
|
||||
bs->file->bs->supported_zero_flags);
|
||||
|
||||
s->bcs = block_copy_state_new(bs->file, s->target, errp);
|
||||
s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp);
|
||||
if (!s->bcs) {
|
||||
error_prepend(errp, "Cannot create block-copy-state: ");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
copy_bitmap = block_copy_dirty_bitmap(s->bcs);
|
||||
bdrv_set_dirty_bitmap(copy_bitmap, 0, bdrv_dirty_bitmap_size(copy_bitmap));
|
||||
cluster_size = block_copy_cluster_size(s->bcs);
|
||||
|
||||
s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
|
||||
if (!s->done_bitmap) {
|
||||
return -EINVAL;
|
||||
}
|
||||
bdrv_disable_dirty_bitmap(s->done_bitmap);
|
||||
|
||||
/* s->access_bitmap starts equal to bcs bitmap */
|
||||
s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
|
||||
if (!s->access_bitmap) {
|
||||
return -EINVAL;
|
||||
}
|
||||
bdrv_disable_dirty_bitmap(s->access_bitmap);
|
||||
bdrv_dirty_bitmap_merge_internal(s->access_bitmap,
|
||||
block_copy_dirty_bitmap(s->bcs), NULL,
|
||||
true);
|
||||
|
||||
qemu_co_mutex_init(&s->lock);
|
||||
QLIST_INIT(&s->frozen_read_reqs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -187,6 +435,9 @@ static void cbw_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVCopyBeforeWriteState *s = bs->opaque;
|
||||
|
||||
bdrv_release_dirty_bitmap(s->access_bitmap);
|
||||
bdrv_release_dirty_bitmap(s->done_bitmap);
|
||||
|
||||
block_copy_state_free(s->bcs);
|
||||
s->bcs = NULL;
|
||||
}
|
||||
@ -204,6 +455,10 @@ BlockDriver bdrv_cbw_filter = {
|
||||
.bdrv_co_pdiscard = cbw_co_pdiscard,
|
||||
.bdrv_co_flush = cbw_co_flush,
|
||||
|
||||
.bdrv_co_preadv_snapshot = cbw_co_preadv_snapshot,
|
||||
.bdrv_co_pdiscard_snapshot = cbw_co_pdiscard_snapshot,
|
||||
.bdrv_co_snapshot_block_status = cbw_co_snapshot_block_status,
|
||||
|
||||
.bdrv_refresh_filename = cbw_refresh_filename,
|
||||
|
||||
.bdrv_child_perm = cbw_child_perm,
|
||||
@ -223,6 +478,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
|
||||
QDict *opts;
|
||||
|
||||
assert(source->total_sectors == target->total_sectors);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
opts = qdict_new();
|
||||
qdict_put_str(opts, "driver", "copy-before-write");
|
||||
@ -245,6 +501,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
|
||||
|
||||
void bdrv_cbw_drop(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_drop_filter(bs, &error_abort);
|
||||
bdrv_unref(bs);
|
||||
}
|
||||
|
@ -29,6 +29,13 @@
|
||||
#include "block/block_int.h"
|
||||
#include "block/block-copy.h"
|
||||
|
||||
/*
|
||||
* Global state (GS) API. These functions run under the BQL.
|
||||
*
|
||||
* See include/block/block-global-state.h for more information about
|
||||
* the GS API.
|
||||
*/
|
||||
|
||||
BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
|
||||
BlockDriverState *target,
|
||||
const char *filter_node_name,
|
||||
|
@ -30,17 +30,17 @@
|
||||
/* For blk_bs() in generated block/block-gen.c */
|
||||
#include "sysemu/block-backend.h"
|
||||
|
||||
/*
|
||||
* I/O API functions. These functions are thread-safe.
|
||||
*
|
||||
* See include/block/block-io.h for more information about
|
||||
* the I/O API.
|
||||
*/
|
||||
|
||||
int coroutine_fn bdrv_co_check(BlockDriverState *bs,
|
||||
BdrvCheckResult *res, BdrvCheckMode fix);
|
||||
int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
|
||||
|
||||
int generated_co_wrapper
|
||||
bdrv_preadv(BdrvChild *child, int64_t offset, unsigned int bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags);
|
||||
int generated_co_wrapper
|
||||
bdrv_pwritev(BdrvChild *child, int64_t offset, unsigned int bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags);
|
||||
|
||||
int coroutine_fn
|
||||
bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
@ -52,57 +52,81 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
int64_t *map,
|
||||
BlockDriverState **file,
|
||||
int *depth);
|
||||
int generated_co_wrapper
|
||||
bdrv_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
bool include_base,
|
||||
bool want_zero,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
int64_t *pnum,
|
||||
int64_t *map,
|
||||
BlockDriverState **file,
|
||||
int *depth);
|
||||
|
||||
int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov, int64_t pos);
|
||||
int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov, int64_t pos);
|
||||
|
||||
int generated_co_wrapper
|
||||
nbd_do_establish_connection(BlockDriverState *bs, Error **errp);
|
||||
int coroutine_fn
|
||||
nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp);
|
||||
nbd_co_do_establish_connection(BlockDriverState *bs, bool blocking,
|
||||
Error **errp);
|
||||
|
||||
|
||||
int generated_co_wrapper
|
||||
blk_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags);
|
||||
int coroutine_fn
|
||||
blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags);
|
||||
|
||||
|
||||
int generated_co_wrapper
|
||||
blk_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
BdrvRequestFlags flags);
|
||||
int coroutine_fn
|
||||
blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
BdrvRequestFlags flags);
|
||||
|
||||
int generated_co_wrapper
|
||||
blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
||||
int coroutine_fn
|
||||
blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
||||
|
||||
int generated_co_wrapper
|
||||
blk_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
|
||||
int coroutine_fn
|
||||
blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
|
||||
|
||||
int generated_co_wrapper blk_do_flush(BlockBackend *blk);
|
||||
int coroutine_fn blk_co_do_flush(BlockBackend *blk);
|
||||
|
||||
|
||||
/*
|
||||
* "I/O or GS" API functions. These functions can run without
|
||||
* the BQL, but only in one specific iothread/main loop.
|
||||
*
|
||||
* See include/block/block-io.h for more information about
|
||||
* the "I/O or GS" API.
|
||||
*/
|
||||
|
||||
int generated_co_wrapper
|
||||
bdrv_preadv(BdrvChild *child, int64_t offset, unsigned int bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags);
|
||||
|
||||
int generated_co_wrapper
|
||||
bdrv_pwritev(BdrvChild *child, int64_t offset, unsigned int bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags);
|
||||
|
||||
int generated_co_wrapper
|
||||
bdrv_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
bool include_base,
|
||||
bool want_zero,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
int64_t *pnum,
|
||||
int64_t *map,
|
||||
BlockDriverState **file,
|
||||
int *depth);
|
||||
int generated_co_wrapper
|
||||
nbd_do_establish_connection(BlockDriverState *bs, bool blocking, Error **errp);
|
||||
|
||||
int generated_co_wrapper
|
||||
blk_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags);
|
||||
|
||||
int generated_co_wrapper
|
||||
blk_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
BdrvRequestFlags flags);
|
||||
|
||||
int generated_co_wrapper
|
||||
blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
||||
|
||||
int generated_co_wrapper
|
||||
blk_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
|
||||
|
||||
int generated_co_wrapper blk_do_flush(BlockBackend *blk);
|
||||
|
||||
#endif /* BLOCK_COROUTINES_INT_H */
|
||||
|
@ -42,6 +42,8 @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
|
||||
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
job_progress_set_remaining(&s->common, 1);
|
||||
ret = s->drv->bdrv_co_create(s->opts, errp);
|
||||
job_progress_update(&s->common, 1);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "crypto.h"
|
||||
|
||||
typedef struct BlockCrypto BlockCrypto;
|
||||
@ -777,6 +778,37 @@ block_crypto_get_specific_info_luks(BlockDriverState *bs, Error **errp)
|
||||
return spec_info;
|
||||
}
|
||||
|
||||
static int
|
||||
block_crypto_amend_prepare(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BlockCrypto *crypto = bs->opaque;
|
||||
int ret;
|
||||
|
||||
/* apply for exclusive read/write permissions to the underlying file */
|
||||
crypto->updating_keys = true;
|
||||
ret = bdrv_child_refresh_perms(bs, bs->file, errp);
|
||||
if (ret < 0) {
|
||||
/* Well, in this case we will not be updating any keys */
|
||||
crypto->updating_keys = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
block_crypto_amend_cleanup(BlockDriverState *bs)
|
||||
{
|
||||
BlockCrypto *crypto = bs->opaque;
|
||||
Error *errp = NULL;
|
||||
|
||||
/* release exclusive read/write permissions to the underlying file */
|
||||
crypto->updating_keys = false;
|
||||
bdrv_child_refresh_perms(bs, bs->file, &errp);
|
||||
|
||||
if (errp) {
|
||||
error_report_err(errp);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
block_crypto_amend_options_generic_luks(BlockDriverState *bs,
|
||||
QCryptoBlockAmendOptions *amend_options,
|
||||
@ -784,30 +816,17 @@ block_crypto_amend_options_generic_luks(BlockDriverState *bs,
|
||||
Error **errp)
|
||||
{
|
||||
BlockCrypto *crypto = bs->opaque;
|
||||
int ret;
|
||||
|
||||
assert(crypto);
|
||||
assert(crypto->block);
|
||||
|
||||
/* apply for exclusive read/write permissions to the underlying file*/
|
||||
crypto->updating_keys = true;
|
||||
ret = bdrv_child_refresh_perms(bs, bs->file, errp);
|
||||
if (ret) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = qcrypto_block_amend_options(crypto->block,
|
||||
return qcrypto_block_amend_options(crypto->block,
|
||||
block_crypto_read_func,
|
||||
block_crypto_write_func,
|
||||
bs,
|
||||
amend_options,
|
||||
force,
|
||||
errp);
|
||||
cleanup:
|
||||
/* release exclusive read/write permissions to the underlying file*/
|
||||
crypto->updating_keys = false;
|
||||
bdrv_child_refresh_perms(bs, bs->file, errp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -833,8 +852,16 @@ block_crypto_amend_options_luks(BlockDriverState *bs,
|
||||
if (!amend_options) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = block_crypto_amend_prepare(bs, errp);
|
||||
if (ret) {
|
||||
goto perm_cleanup;
|
||||
}
|
||||
ret = block_crypto_amend_options_generic_luks(bs, amend_options,
|
||||
force, errp);
|
||||
|
||||
perm_cleanup:
|
||||
block_crypto_amend_cleanup(bs);
|
||||
cleanup:
|
||||
qapi_free_QCryptoBlockAmendOptions(amend_options);
|
||||
return ret;
|
||||
@ -931,6 +958,8 @@ static BlockDriver bdrv_crypto_luks = {
|
||||
.bdrv_get_specific_info = block_crypto_get_specific_info_luks,
|
||||
.bdrv_amend_options = block_crypto_amend_options_luks,
|
||||
.bdrv_co_amend = block_crypto_co_amend_luks,
|
||||
.bdrv_amend_pre_run = block_crypto_amend_prepare,
|
||||
.bdrv_amend_clean = block_crypto_amend_cleanup,
|
||||
|
||||
.is_format = true,
|
||||
|
||||
|
88
block/curl.c
88
block/curl.c
@ -458,38 +458,51 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state)
|
||||
if (!state->curl) {
|
||||
return -EIO;
|
||||
}
|
||||
curl_easy_setopt(state->curl, CURLOPT_URL, s->url);
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_URL, s->url) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYPEER,
|
||||
(long) s->sslverify);
|
||||
(long) s->sslverify) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYHOST,
|
||||
s->sslverify ? 2L : 0L);
|
||||
if (s->cookie) {
|
||||
curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie);
|
||||
s->sslverify ? 2L : 0L)) {
|
||||
goto err;
|
||||
}
|
||||
curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, (long)s->timeout);
|
||||
if (s->cookie) {
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie)) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, (long)s->timeout) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION,
|
||||
(void *)curl_read_cb);
|
||||
curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state);
|
||||
curl_easy_setopt(state->curl, CURLOPT_PRIVATE, (void *)state);
|
||||
curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1);
|
||||
curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1);
|
||||
curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1);
|
||||
curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg);
|
||||
curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1);
|
||||
|
||||
(void *)curl_read_cb) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_PRIVATE, (void *)state) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1)) {
|
||||
goto err;
|
||||
}
|
||||
if (s->username) {
|
||||
curl_easy_setopt(state->curl, CURLOPT_USERNAME, s->username);
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_USERNAME, s->username)) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
if (s->password) {
|
||||
curl_easy_setopt(state->curl, CURLOPT_PASSWORD, s->password);
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_PASSWORD, s->password)) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
if (s->proxyusername) {
|
||||
curl_easy_setopt(state->curl,
|
||||
CURLOPT_PROXYUSERNAME, s->proxyusername);
|
||||
if (curl_easy_setopt(state->curl,
|
||||
CURLOPT_PROXYUSERNAME, s->proxyusername)) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
if (s->proxypassword) {
|
||||
curl_easy_setopt(state->curl,
|
||||
CURLOPT_PROXYPASSWORD, s->proxypassword);
|
||||
if (curl_easy_setopt(state->curl,
|
||||
CURLOPT_PROXYPASSWORD, s->proxypassword)) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Restrict supported protocols to avoid security issues in the more
|
||||
@ -499,18 +512,27 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state)
|
||||
* Restricting protocols is only supported from 7.19.4 upwards.
|
||||
*/
|
||||
#if LIBCURL_VERSION_NUM >= 0x071304
|
||||
curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS);
|
||||
curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS);
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS)) {
|
||||
goto err;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_VERBOSE
|
||||
curl_easy_setopt(state->curl, CURLOPT_VERBOSE, 1);
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_VERBOSE, 1)) {
|
||||
goto err;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
state->s = s;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
curl_easy_cleanup(state->curl);
|
||||
state->curl = NULL;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Called with s->mutex held. */
|
||||
@ -759,14 +781,19 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
// Get file size
|
||||
|
||||
if (curl_init_state(s, state) < 0) {
|
||||
pstrcpy(state->errmsg, CURL_ERROR_SIZE,
|
||||
"curl library initialization failed.");
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->accept_range = false;
|
||||
curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1);
|
||||
curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION,
|
||||
curl_header_cb);
|
||||
curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s);
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, curl_header_cb) ||
|
||||
curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s)) {
|
||||
pstrcpy(state->errmsg, CURL_ERROR_SIZE,
|
||||
"curl library initialization failed.");
|
||||
goto out;
|
||||
}
|
||||
if (curl_easy_perform(state->curl))
|
||||
goto out;
|
||||
if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d)) {
|
||||
@ -879,9 +906,8 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
|
||||
|
||||
snprintf(state->range, 127, "%" PRIu64 "-%" PRIu64, start, end);
|
||||
trace_curl_setup_preadv(acb->bytes, start, state->range);
|
||||
curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range);
|
||||
|
||||
if (curl_multi_add_handle(s->multi, state->curl) != CURLM_OK) {
|
||||
if (curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range) ||
|
||||
curl_multi_add_handle(s->multi, state->curl) != CURLM_OK) {
|
||||
state->acb[0] = NULL;
|
||||
acb->ret = -EIO;
|
||||
|
||||
|
@ -496,6 +496,7 @@ static void coroutine_fn bdrv_co_can_store_new_dirty_bitmap_entry(void *opaque)
|
||||
bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
uint32_t granularity, Error **errp)
|
||||
{
|
||||
IO_CODE();
|
||||
if (qemu_in_coroutine()) {
|
||||
return bdrv_co_can_store_new_dirty_bitmap(bs, name, granularity, errp);
|
||||
} else {
|
||||
@ -656,6 +657,7 @@ void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap,
|
||||
|
||||
void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out)
|
||||
{
|
||||
IO_CODE();
|
||||
assert(!bdrv_dirty_bitmap_readonly(bitmap));
|
||||
bdrv_dirty_bitmaps_lock(bitmap->bs);
|
||||
if (!out) {
|
||||
@ -673,6 +675,7 @@ void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup)
|
||||
{
|
||||
HBitmap *tmp = bitmap->bitmap;
|
||||
assert(!bdrv_dirty_bitmap_readonly(bitmap));
|
||||
GLOBAL_STATE_CODE();
|
||||
bitmap->bitmap = backup;
|
||||
hbitmap_free(tmp);
|
||||
}
|
||||
@ -737,6 +740,7 @@ void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap)
|
||||
void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
{
|
||||
BdrvDirtyBitmap *bitmap;
|
||||
IO_CODE();
|
||||
|
||||
if (QLIST_EMPTY(&bs->dirty_bitmaps)) {
|
||||
return;
|
||||
@ -875,16 +879,25 @@ bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
|
||||
dirty_start, dirty_count);
|
||||
}
|
||||
|
||||
bool bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap, int64_t offset,
|
||||
int64_t bytes, int64_t *count)
|
||||
{
|
||||
return hbitmap_status(bitmap->bitmap, offset, bytes, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* bdrv_merge_dirty_bitmap: merge src into dest.
|
||||
* Ensures permissions on bitmaps are reasonable; use for public API.
|
||||
*
|
||||
* @backup: If provided, make a copy of dest here prior to merge.
|
||||
*
|
||||
* Returns true on success, false on failure. In case of failure bitmaps are
|
||||
* untouched.
|
||||
*/
|
||||
void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
|
||||
bool bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
|
||||
HBitmap **backup, Error **errp)
|
||||
{
|
||||
bool ret;
|
||||
bool ret = false;
|
||||
|
||||
bdrv_dirty_bitmaps_lock(dest->bs);
|
||||
if (src->bs != dest->bs) {
|
||||
@ -912,6 +925,8 @@ out:
|
||||
if (src->bs != dest->bs) {
|
||||
bdrv_dirty_bitmaps_unlock(src->bs);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -928,6 +943,7 @@ bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
|
||||
bool lock)
|
||||
{
|
||||
bool ret;
|
||||
IO_CODE();
|
||||
|
||||
assert(!bdrv_dirty_bitmap_readonly(dest));
|
||||
assert(!bdrv_dirty_bitmap_inconsistent(dest));
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "dmg.h"
|
||||
|
||||
int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in,
|
||||
|
@ -139,7 +139,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||
* access since the export could be available before migration handover.
|
||||
* ctx was acquired in the caller.
|
||||
*/
|
||||
bdrv_invalidate_cache(bs, NULL);
|
||||
bdrv_activate(bs, NULL);
|
||||
|
||||
perm = BLK_PERM_CONSISTENT_READ;
|
||||
if (export->writable) {
|
||||
|
@ -19,6 +19,7 @@
|
||||
#define FUSE_USE_VERSION 31
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "block/aio.h"
|
||||
#include "block/block.h"
|
||||
#include "block/export.h"
|
||||
@ -86,8 +87,8 @@ static int fuse_export_create(BlockExport *blk_exp,
|
||||
|
||||
assert(blk_exp_args->type == BLOCK_EXPORT_TYPE_FUSE);
|
||||
|
||||
/* For growable exports, take the RESIZE permission */
|
||||
if (args->growable) {
|
||||
/* For growable and writable exports, take the RESIZE permission */
|
||||
if (args->growable || blk_exp_args->writable) {
|
||||
uint64_t blk_perm, blk_shared_perm;
|
||||
|
||||
blk_get_perm(exp->common.blk, &blk_perm, &blk_shared_perm);
|
||||
@ -392,14 +393,23 @@ static int fuse_do_truncate(const FuseExport *exp, int64_t size,
|
||||
{
|
||||
uint64_t blk_perm, blk_shared_perm;
|
||||
BdrvRequestFlags truncate_flags = 0;
|
||||
int ret;
|
||||
bool add_resize_perm;
|
||||
int ret, ret_check;
|
||||
|
||||
/* Growable and writable exports have a permanent RESIZE permission */
|
||||
add_resize_perm = !exp->growable && !exp->writable;
|
||||
|
||||
if (req_zero_write) {
|
||||
truncate_flags |= BDRV_REQ_ZERO_WRITE;
|
||||
}
|
||||
|
||||
/* Growable exports have a permanent RESIZE permission */
|
||||
if (!exp->growable) {
|
||||
if (add_resize_perm) {
|
||||
|
||||
if (!qemu_in_main_thread()) {
|
||||
/* Changing permissions like below only works in the main thread */
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
blk_get_perm(exp->common.blk, &blk_perm, &blk_shared_perm);
|
||||
|
||||
ret = blk_set_perm(exp->common.blk, blk_perm | BLK_PERM_RESIZE,
|
||||
@ -412,9 +422,11 @@ static int fuse_do_truncate(const FuseExport *exp, int64_t size,
|
||||
ret = blk_truncate(exp->common.blk, size, true, prealloc,
|
||||
truncate_flags, NULL);
|
||||
|
||||
if (!exp->growable) {
|
||||
if (add_resize_perm) {
|
||||
/* Must succeed, because we are only giving up the RESIZE permission */
|
||||
blk_set_perm(exp->common.blk, blk_perm, blk_shared_perm, &error_abort);
|
||||
ret_check = blk_set_perm(exp->common.blk, blk_perm,
|
||||
blk_shared_perm, &error_abort);
|
||||
assert(ret_check == 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/error-report.h"
|
||||
@ -31,6 +30,7 @@
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/units.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "trace.h"
|
||||
#include "block/thread-pool.h"
|
||||
#include "qemu/iov.h"
|
||||
@ -385,7 +385,7 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
char *buf;
|
||||
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size);
|
||||
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
|
||||
size_t alignments[] = {1, 512, 1024, 2048, 4096};
|
||||
|
||||
/* For SCSI generic devices the alignment is not really used.
|
||||
@ -1022,6 +1022,21 @@ static int raw_handle_perm_lock(BlockDriverState *bs,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Sets a specific flag */
|
||||
static int fcntl_setfl(int fd, int flag)
|
||||
{
|
||||
int flags;
|
||||
|
||||
flags = fcntl(fd, F_GETFL);
|
||||
if (flags == -1) {
|
||||
return -errno;
|
||||
}
|
||||
if (fcntl(fd, F_SETFL, flags | flag) == -1) {
|
||||
return -errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int raw_reconfigure_getfd(BlockDriverState *bs, int flags,
|
||||
int *open_flags, uint64_t perm, bool force_dup,
|
||||
Error **errp)
|
||||
@ -1260,7 +1275,7 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
raw_probe_alignment(bs, s->fd, errp);
|
||||
|
||||
bs->bl.min_mem_alignment = s->buf_align;
|
||||
bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size);
|
||||
bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size());
|
||||
|
||||
/*
|
||||
* Maximum transfers are best effort, so it is okay to ignore any
|
||||
@ -1885,7 +1900,7 @@ static int allocate_first_block(int fd, size_t max_size)
|
||||
size_t write_size = (max_size < MAX_BLOCKSIZE)
|
||||
? BDRV_SECTOR_SIZE
|
||||
: MAX_BLOCKSIZE;
|
||||
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size);
|
||||
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
|
||||
void *buf;
|
||||
ssize_t n;
|
||||
int ret;
|
||||
@ -3319,17 +3334,23 @@ BlockDriver bdrv_file = {
|
||||
#if defined(__APPLE__) && defined(__MACH__)
|
||||
static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
|
||||
CFIndex maxPathSize, int flags);
|
||||
|
||||
#if !defined(MAC_OS_VERSION_12_0) \
|
||||
|| (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0)
|
||||
#define IOMainPort IOMasterPort
|
||||
#endif
|
||||
|
||||
static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator)
|
||||
{
|
||||
kern_return_t kernResult = KERN_FAILURE;
|
||||
mach_port_t masterPort;
|
||||
mach_port_t mainPort;
|
||||
CFMutableDictionaryRef classesToMatch;
|
||||
const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass};
|
||||
char *mediaType = NULL;
|
||||
|
||||
kernResult = IOMasterPort( MACH_PORT_NULL, &masterPort );
|
||||
kernResult = IOMainPort(MACH_PORT_NULL, &mainPort);
|
||||
if ( KERN_SUCCESS != kernResult ) {
|
||||
printf( "IOMasterPort returned %d\n", kernResult );
|
||||
printf("IOMainPort returned %d\n", kernResult);
|
||||
}
|
||||
|
||||
int index;
|
||||
@ -3342,7 +3363,7 @@ static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator)
|
||||
}
|
||||
CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey),
|
||||
kCFBooleanTrue);
|
||||
kernResult = IOServiceGetMatchingServices(masterPort, classesToMatch,
|
||||
kernResult = IOServiceGetMatchingServices(mainPort, classesToMatch,
|
||||
mediaIterator);
|
||||
if (kernResult != KERN_SUCCESS) {
|
||||
error_report("Note: IOServiceGetMatchingServices returned %d",
|
||||
|
154
block/io.c
154
block/io.c
@ -32,6 +32,7 @@
|
||||
#include "block/coroutines.h"
|
||||
#include "block/write-threshold.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
@ -70,6 +71,7 @@ static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
|
||||
void bdrv_parent_drained_end_single(BdrvChild *c)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
|
||||
BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
@ -114,6 +116,7 @@ static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
|
||||
|
||||
void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
c->parent_quiesce_counter++;
|
||||
if (c->klass->drained_begin) {
|
||||
c->klass->drained_begin(c);
|
||||
@ -164,6 +167,8 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
|
||||
BdrvChild *c;
|
||||
bool have_limits;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (tran) {
|
||||
BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
|
||||
*s = (BdrvRefreshLimitsState) {
|
||||
@ -189,10 +194,6 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
|
||||
QLIST_FOREACH(c, &bs->children, next) {
|
||||
if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
|
||||
{
|
||||
bdrv_refresh_limits(c->bs, tran, errp);
|
||||
if (*errp) {
|
||||
return;
|
||||
}
|
||||
bdrv_merge_limits(&bs->bl, &c->bs->bl);
|
||||
have_limits = true;
|
||||
}
|
||||
@ -200,7 +201,7 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
|
||||
|
||||
if (!have_limits) {
|
||||
bs->bl.min_mem_alignment = 512;
|
||||
bs->bl.opt_mem_alignment = qemu_real_host_page_size;
|
||||
bs->bl.opt_mem_alignment = qemu_real_host_page_size();
|
||||
|
||||
/* Safe default since most protocols use readv()/writev()/etc */
|
||||
bs->bl.max_iov = IOV_MAX;
|
||||
@ -226,12 +227,14 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
|
||||
*/
|
||||
void bdrv_enable_copy_on_read(BlockDriverState *bs)
|
||||
{
|
||||
IO_CODE();
|
||||
qatomic_inc(&bs->copy_on_read);
|
||||
}
|
||||
|
||||
void bdrv_disable_copy_on_read(BlockDriverState *bs)
|
||||
{
|
||||
int old = qatomic_fetch_dec(&bs->copy_on_read);
|
||||
IO_CODE();
|
||||
assert(old >= 1);
|
||||
}
|
||||
|
||||
@ -303,6 +306,7 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
|
||||
BdrvChild *ignore_parent, bool ignore_bds_parents)
|
||||
{
|
||||
BdrvChild *child, *next;
|
||||
IO_OR_GS_CODE();
|
||||
|
||||
if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
|
||||
return true;
|
||||
@ -426,6 +430,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
||||
void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
|
||||
BdrvChild *parent, bool ignore_bds_parents)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
assert(!qemu_in_coroutine());
|
||||
|
||||
/* Stop things in parent-to-child order */
|
||||
@ -477,11 +482,13 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
|
||||
|
||||
void bdrv_drained_begin(BlockDriverState *bs)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_begin(bs, false, NULL, false, true);
|
||||
}
|
||||
|
||||
void bdrv_subtree_drained_begin(BlockDriverState *bs)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_begin(bs, true, NULL, false, true);
|
||||
}
|
||||
|
||||
@ -538,18 +545,21 @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
|
||||
void bdrv_drained_end(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
|
||||
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
|
||||
{
|
||||
IO_CODE();
|
||||
bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
|
||||
}
|
||||
|
||||
void bdrv_subtree_drained_end(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
|
||||
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
@ -557,6 +567,7 @@ void bdrv_subtree_drained_end(BlockDriverState *bs)
|
||||
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
|
||||
{
|
||||
int i;
|
||||
IO_OR_GS_CODE();
|
||||
|
||||
for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
|
||||
bdrv_do_drained_begin(child->bs, true, child, false, true);
|
||||
@ -567,6 +578,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
int i;
|
||||
IO_OR_GS_CODE();
|
||||
|
||||
for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
|
||||
bdrv_do_drained_end(child->bs, true, child, false,
|
||||
@ -585,6 +597,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
|
||||
*/
|
||||
void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
assert(qemu_in_coroutine());
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_drained_end(bs);
|
||||
@ -592,6 +605,7 @@ void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
|
||||
|
||||
void bdrv_drain(BlockDriverState *bs)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_drained_end(bs);
|
||||
}
|
||||
@ -612,6 +626,7 @@ static bool bdrv_drain_all_poll(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
bool result = false;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
|
||||
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
|
||||
@ -640,6 +655,7 @@ static bool bdrv_drain_all_poll(void)
|
||||
void bdrv_drain_all_begin(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (qemu_in_coroutine()) {
|
||||
bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
|
||||
@ -682,6 +698,7 @@ void bdrv_drain_all_begin(void)
|
||||
void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
g_assert(bs->quiesce_counter > 0);
|
||||
g_assert(!bs->refcnt);
|
||||
@ -696,6 +713,7 @@ void bdrv_drain_all_end(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
int drained_end_counter = 0;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/*
|
||||
* bdrv queue is managed by record/replay,
|
||||
@ -723,6 +741,7 @@ void bdrv_drain_all_end(void)
|
||||
|
||||
void bdrv_drain_all(void)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
@ -867,6 +886,7 @@ BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
|
||||
{
|
||||
BdrvTrackedRequest *req;
|
||||
Coroutine *self = qemu_coroutine_self();
|
||||
IO_CODE();
|
||||
|
||||
QLIST_FOREACH(req, &bs->tracked_requests, list) {
|
||||
if (req->co == self) {
|
||||
@ -886,7 +906,7 @@ void bdrv_round_to_clusters(BlockDriverState *bs,
|
||||
int64_t *cluster_bytes)
|
||||
{
|
||||
BlockDriverInfo bdi;
|
||||
|
||||
IO_CODE();
|
||||
if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
|
||||
*cluster_offset = offset;
|
||||
*cluster_bytes = bytes;
|
||||
@ -912,16 +932,19 @@ static int bdrv_get_cluster_size(BlockDriverState *bs)
|
||||
|
||||
void bdrv_inc_in_flight(BlockDriverState *bs)
|
||||
{
|
||||
IO_CODE();
|
||||
qatomic_inc(&bs->in_flight);
|
||||
}
|
||||
|
||||
void bdrv_wakeup(BlockDriverState *bs)
|
||||
{
|
||||
IO_CODE();
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
void bdrv_dec_in_flight(BlockDriverState *bs)
|
||||
{
|
||||
IO_CODE();
|
||||
qatomic_dec(&bs->in_flight);
|
||||
bdrv_wakeup(bs);
|
||||
}
|
||||
@ -946,6 +969,7 @@ bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
|
||||
uint64_t align)
|
||||
{
|
||||
bool waited;
|
||||
IO_CODE();
|
||||
|
||||
qemu_co_mutex_lock(&req->bs->reqs_lock);
|
||||
|
||||
@ -1040,6 +1064,7 @@ static int bdrv_check_request32(int64_t offset, int64_t bytes,
|
||||
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_pwritev(child, offset, bytes, NULL,
|
||||
BDRV_REQ_ZERO_WRITE | flags);
|
||||
}
|
||||
@ -1058,6 +1083,7 @@ int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
|
||||
int ret;
|
||||
int64_t target_size, bytes, offset = 0;
|
||||
BlockDriverState *bs = child->bs;
|
||||
IO_CODE();
|
||||
|
||||
target_size = bdrv_getlength(bs);
|
||||
if (target_size < 0) {
|
||||
@ -1090,6 +1116,7 @@ int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes)
|
||||
{
|
||||
int ret;
|
||||
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
|
||||
IO_CODE();
|
||||
|
||||
if (bytes < 0) {
|
||||
return -EINVAL;
|
||||
@ -1111,6 +1138,7 @@ int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf,
|
||||
{
|
||||
int ret;
|
||||
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
|
||||
IO_CODE();
|
||||
|
||||
if (bytes < 0) {
|
||||
return -EINVAL;
|
||||
@ -1131,6 +1159,7 @@ int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
|
||||
const void *buf, int64_t count)
|
||||
{
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_pwrite(child, offset, buf, count);
|
||||
if (ret < 0) {
|
||||
@ -1797,6 +1826,7 @@ int coroutine_fn bdrv_co_preadv(BdrvChild *child,
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
|
||||
}
|
||||
|
||||
@ -1809,6 +1839,7 @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
|
||||
BdrvTrackedRequest req;
|
||||
BdrvRequestPadding pad;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
|
||||
|
||||
@ -2173,6 +2204,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
|
||||
|
||||
padding = bdrv_init_padding(bs, offset, bytes, &pad);
|
||||
if (padding) {
|
||||
assert(!(flags & BDRV_REQ_NO_WAIT));
|
||||
bdrv_make_request_serialising(req, align);
|
||||
|
||||
bdrv_padding_rmw_read(child, req, &pad, true);
|
||||
@ -2230,6 +2262,7 @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
|
||||
}
|
||||
|
||||
@ -2243,6 +2276,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
|
||||
BdrvRequestPadding pad;
|
||||
int ret;
|
||||
bool padded = false;
|
||||
IO_CODE();
|
||||
|
||||
trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
|
||||
|
||||
@ -2307,6 +2341,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
|
||||
* serialize the request to prevent interactions of the
|
||||
* widened region with other transactions.
|
||||
*/
|
||||
assert(!(flags & BDRV_REQ_NO_WAIT));
|
||||
bdrv_make_request_serialising(&req, align);
|
||||
bdrv_padding_rmw_read(child, &req, &pad, false);
|
||||
}
|
||||
@ -2326,6 +2361,7 @@ out:
|
||||
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
IO_CODE();
|
||||
trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
|
||||
|
||||
if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
|
||||
@ -2345,6 +2381,8 @@ int bdrv_flush_all(void)
|
||||
BlockDriverState *bs = NULL;
|
||||
int result = 0;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/*
|
||||
* bdrv queue is managed by record/replay,
|
||||
* creating new flush request for stopping
|
||||
@ -2639,6 +2677,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
BlockDriverState *p;
|
||||
int64_t eof = 0;
|
||||
int dummy;
|
||||
IO_CODE();
|
||||
|
||||
assert(!include_base || base); /* Can't include NULL base */
|
||||
|
||||
@ -2728,6 +2767,7 @@ int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
|
||||
int64_t offset, int64_t bytes, int64_t *pnum,
|
||||
int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
|
||||
pnum, map, file, NULL);
|
||||
}
|
||||
@ -2735,6 +2775,7 @@ int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
|
||||
int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
|
||||
offset, bytes, pnum, map, file);
|
||||
}
|
||||
@ -2751,6 +2792,7 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
|
||||
{
|
||||
int ret;
|
||||
int64_t pnum = bytes;
|
||||
IO_CODE();
|
||||
|
||||
if (!bytes) {
|
||||
return 1;
|
||||
@ -2771,6 +2813,7 @@ int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
|
||||
{
|
||||
int ret;
|
||||
int64_t dummy;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
|
||||
bytes, pnum ? pnum : &dummy, NULL,
|
||||
@ -2807,6 +2850,7 @@ int bdrv_is_allocated_above(BlockDriverState *top,
|
||||
int ret = bdrv_common_block_status_above(top, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
IO_CODE();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -2823,6 +2867,7 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
||||
BlockDriver *drv = bs->drv;
|
||||
BlockDriverState *child_bs = bdrv_primary_bs(bs);
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
|
||||
if (ret < 0) {
|
||||
@ -2854,6 +2899,7 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
||||
BlockDriver *drv = bs->drv;
|
||||
BlockDriverState *child_bs = bdrv_primary_bs(bs);
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
|
||||
if (ret < 0) {
|
||||
@ -2884,6 +2930,7 @@ int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
|
||||
{
|
||||
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
|
||||
int ret = bdrv_writev_vmstate(bs, &qiov, pos);
|
||||
IO_CODE();
|
||||
|
||||
return ret < 0 ? ret : size;
|
||||
}
|
||||
@ -2893,6 +2940,7 @@ int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
|
||||
{
|
||||
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
|
||||
int ret = bdrv_readv_vmstate(bs, &qiov, pos);
|
||||
IO_CODE();
|
||||
|
||||
return ret < 0 ? ret : size;
|
||||
}
|
||||
@ -2902,6 +2950,7 @@ int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
|
||||
|
||||
void bdrv_aio_cancel(BlockAIOCB *acb)
|
||||
{
|
||||
IO_CODE();
|
||||
qemu_aio_ref(acb);
|
||||
bdrv_aio_cancel_async(acb);
|
||||
while (acb->refcnt > 1) {
|
||||
@ -2926,6 +2975,7 @@ void bdrv_aio_cancel(BlockAIOCB *acb)
|
||||
* In either case the completion callback must be called. */
|
||||
void bdrv_aio_cancel_async(BlockAIOCB *acb)
|
||||
{
|
||||
IO_CODE();
|
||||
if (acb->aiocb_info->cancel_async) {
|
||||
acb->aiocb_info->cancel_async(acb);
|
||||
}
|
||||
@ -2940,6 +2990,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
|
||||
BdrvChild *child;
|
||||
int current_gen;
|
||||
int ret = 0;
|
||||
IO_CODE();
|
||||
|
||||
bdrv_inc_in_flight(bs);
|
||||
|
||||
@ -3065,6 +3116,7 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
|
||||
int64_t max_pdiscard;
|
||||
int head, tail, align;
|
||||
BlockDriverState *bs = child->bs;
|
||||
IO_CODE();
|
||||
|
||||
if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
|
||||
return -ENOMEDIUM;
|
||||
@ -3183,6 +3235,7 @@ int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
|
||||
.coroutine = qemu_coroutine_self(),
|
||||
};
|
||||
BlockAIOCB *acb;
|
||||
IO_CODE();
|
||||
|
||||
bdrv_inc_in_flight(bs);
|
||||
if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
|
||||
@ -3207,17 +3260,20 @@ out:
|
||||
|
||||
void *qemu_blockalign(BlockDriverState *bs, size_t size)
|
||||
{
|
||||
IO_CODE();
|
||||
return qemu_memalign(bdrv_opt_mem_align(bs), size);
|
||||
}
|
||||
|
||||
void *qemu_blockalign0(BlockDriverState *bs, size_t size)
|
||||
{
|
||||
IO_CODE();
|
||||
return memset(qemu_blockalign(bs, size), 0, size);
|
||||
}
|
||||
|
||||
void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
|
||||
{
|
||||
size_t align = bdrv_opt_mem_align(bs);
|
||||
IO_CODE();
|
||||
|
||||
/* Ensure that NULL is never returned on success */
|
||||
assert(align > 0);
|
||||
@ -3231,6 +3287,7 @@ void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
|
||||
void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
|
||||
{
|
||||
void *mem = qemu_try_blockalign(bs, size);
|
||||
IO_CODE();
|
||||
|
||||
if (mem) {
|
||||
memset(mem, 0, size);
|
||||
@ -3246,6 +3303,7 @@ bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
|
||||
{
|
||||
int i;
|
||||
size_t alignment = bdrv_min_mem_align(bs);
|
||||
IO_CODE();
|
||||
|
||||
for (i = 0; i < qiov->niov; i++) {
|
||||
if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
|
||||
@ -3262,6 +3320,7 @@ bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
|
||||
void bdrv_io_plug(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child;
|
||||
IO_CODE();
|
||||
|
||||
QLIST_FOREACH(child, &bs->children, next) {
|
||||
bdrv_io_plug(child->bs);
|
||||
@ -3278,6 +3337,7 @@ void bdrv_io_plug(BlockDriverState *bs)
|
||||
void bdrv_io_unplug(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child;
|
||||
IO_CODE();
|
||||
|
||||
assert(bs->io_plugged);
|
||||
if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
|
||||
@ -3296,6 +3356,7 @@ void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
|
||||
{
|
||||
BdrvChild *child;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
if (bs->drv && bs->drv->bdrv_register_buf) {
|
||||
bs->drv->bdrv_register_buf(bs, host, size);
|
||||
}
|
||||
@ -3308,6 +3369,7 @@ void bdrv_unregister_buf(BlockDriverState *bs, void *host)
|
||||
{
|
||||
BdrvChild *child;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
if (bs->drv && bs->drv->bdrv_unregister_buf) {
|
||||
bs->drv->bdrv_unregister_buf(bs, host);
|
||||
}
|
||||
@ -3328,6 +3390,8 @@ static int coroutine_fn bdrv_co_copy_range_internal(
|
||||
/* TODO We can support BDRV_REQ_NO_FALLBACK here */
|
||||
assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
|
||||
assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
|
||||
assert(!(read_flags & BDRV_REQ_NO_WAIT));
|
||||
assert(!(write_flags & BDRV_REQ_NO_WAIT));
|
||||
|
||||
if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
|
||||
return -ENOMEDIUM;
|
||||
@ -3402,6 +3466,7 @@ int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
IO_CODE();
|
||||
trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
|
||||
read_flags, write_flags);
|
||||
return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
|
||||
@ -3418,6 +3483,7 @@ int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
IO_CODE();
|
||||
trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
|
||||
read_flags, write_flags);
|
||||
return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
|
||||
@ -3429,6 +3495,7 @@ int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
|
||||
int64_t bytes, BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_co_copy_range_from(src, src_offset,
|
||||
dst, dst_offset,
|
||||
bytes, read_flags, write_flags);
|
||||
@ -3461,7 +3528,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
|
||||
BdrvTrackedRequest req;
|
||||
int64_t old_size, new_bytes;
|
||||
int ret;
|
||||
|
||||
IO_CODE();
|
||||
|
||||
/* if bs->drv == NULL, bs is closed, so there's nothing to do here */
|
||||
if (!drv) {
|
||||
@ -3579,6 +3646,7 @@ out:
|
||||
|
||||
void bdrv_cancel_in_flight(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
if (!bs || !bs->drv) {
|
||||
return;
|
||||
}
|
||||
@ -3587,3 +3655,75 @@ void bdrv_cancel_in_flight(BlockDriverState *bs)
|
||||
bs->drv->bdrv_cancel_in_flight(bs);
|
||||
}
|
||||
}
|
||||
|
||||
int coroutine_fn
|
||||
bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset)
|
||||
{
|
||||
BlockDriverState *bs = child->bs;
|
||||
BlockDriver *drv = bs->drv;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
if (!drv->bdrv_co_preadv_snapshot) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
bdrv_inc_in_flight(bs);
|
||||
ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
|
||||
bdrv_dec_in_flight(bs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int coroutine_fn
|
||||
bdrv_co_snapshot_block_status(BlockDriverState *bs,
|
||||
bool want_zero, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
if (!drv->bdrv_co_snapshot_block_status) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
bdrv_inc_in_flight(bs);
|
||||
ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
|
||||
pnum, map, file);
|
||||
bdrv_dec_in_flight(bs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int coroutine_fn
|
||||
bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
if (!drv->bdrv_co_pdiscard_snapshot) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
bdrv_inc_in_flight(bs);
|
||||
ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
|
||||
bdrv_dec_in_flight(bs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -10,7 +10,6 @@
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include <liburing.h>
|
||||
#include "qemu-common.h"
|
||||
#include "block/aio.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "block/block.h"
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include <poll.h>
|
||||
#include <math.h>
|
||||
#include <arpa/inet.h>
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/config-file.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/bitops.h"
|
||||
|
@ -32,7 +32,9 @@ block_ss.add(files(
|
||||
'qcow2.c',
|
||||
'quorum.c',
|
||||
'raw-format.c',
|
||||
'reqlist.c',
|
||||
'snapshot.c',
|
||||
'snapshot-access.c',
|
||||
'throttle-groups.c',
|
||||
'throttle.c',
|
||||
'vhdx-endian.c',
|
||||
@ -131,8 +133,11 @@ block_ss.add(module_block_h)
|
||||
wrapper_py = find_program('../scripts/block-coroutine-wrapper.py')
|
||||
block_gen_c = custom_target('block-gen.c',
|
||||
output: 'block-gen.c',
|
||||
input: files('../include/block/block.h',
|
||||
'coroutines.h'),
|
||||
input: files(
|
||||
'../include/block/block-io.h',
|
||||
'../include/block/block-global-state.h',
|
||||
'coroutines.h'
|
||||
),
|
||||
command: [wrapper_py, '@OUTPUT@', '@INPUT@'])
|
||||
block_ss.add(block_gen_c)
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "qapi/qmp/qerror.h"
|
||||
#include "qemu/ratelimit.h"
|
||||
#include "qemu/bitmap.h"
|
||||
#include "qemu/memalign.h"
|
||||
|
||||
#define MAX_IN_FLIGHT 16
|
||||
#define MAX_IO_BYTES (1 << 20) /* 1 Mb */
|
||||
@ -1864,6 +1865,8 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
|
||||
bool is_none_mode;
|
||||
BlockDriverState *base;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
|
||||
(mode == MIRROR_SYNC_MODE_BITMAP)) {
|
||||
error_setg(errp, "Sync mode '%s' not supported",
|
||||
@ -1889,6 +1892,8 @@ BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
|
||||
bool base_read_only;
|
||||
BlockJob *job;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
base_read_only = bdrv_is_read_only(base);
|
||||
|
||||
if (base_read_only) {
|
||||
|
@ -56,6 +56,8 @@ BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
|
||||
BlockDriverState *bs;
|
||||
BdrvDirtyBitmap *bitmap;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (!node) {
|
||||
error_setg(errp, "Node cannot be NULL");
|
||||
return NULL;
|
||||
@ -155,6 +157,8 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
|
||||
BdrvDirtyBitmap *bitmap;
|
||||
AioContext *aio_context;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
|
||||
if (!bitmap || !bs) {
|
||||
return NULL;
|
||||
@ -253,13 +257,14 @@ void qmp_block_dirty_bitmap_disable(const char *node, const char *name,
|
||||
}
|
||||
|
||||
BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
|
||||
BlockDirtyBitmapMergeSourceList *bms,
|
||||
BlockDirtyBitmapOrStrList *bms,
|
||||
HBitmap **backup, Error **errp)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
BdrvDirtyBitmap *dst, *src, *anon;
|
||||
BlockDirtyBitmapMergeSourceList *lst;
|
||||
Error *local_err = NULL;
|
||||
BlockDirtyBitmapOrStrList *lst;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
dst = block_dirty_bitmap_lookup(node, target, &bs, errp);
|
||||
if (!dst) {
|
||||
@ -297,9 +302,7 @@ BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
|
||||
abort();
|
||||
}
|
||||
|
||||
bdrv_merge_dirty_bitmap(anon, src, NULL, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
if (!bdrv_merge_dirty_bitmap(anon, src, NULL, errp)) {
|
||||
dst = NULL;
|
||||
goto out;
|
||||
}
|
||||
@ -314,7 +317,7 @@ BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
|
||||
}
|
||||
|
||||
void qmp_block_dirty_bitmap_merge(const char *node, const char *target,
|
||||
BlockDirtyBitmapMergeSourceList *bitmaps,
|
||||
BlockDirtyBitmapOrStrList *bitmaps,
|
||||
Error **errp)
|
||||
{
|
||||
block_dirty_bitmap_merge(node, target, bitmaps, NULL, errp);
|
||||
|
288
block/nbd.c
288
block/nbd.c
@ -35,7 +35,6 @@
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/atomic.h"
|
||||
|
||||
#include "qapi/qapi-visit-sockets.h"
|
||||
#include "qapi/qmp/qstring.h"
|
||||
@ -58,7 +57,6 @@ typedef struct {
|
||||
Coroutine *coroutine;
|
||||
uint64_t offset; /* original offset of the request */
|
||||
bool receiving; /* sleeping in the yield in nbd_receive_replies */
|
||||
bool reply_possible; /* reply header not yet received */
|
||||
} NBDClientRequest;
|
||||
|
||||
typedef enum NBDClientState {
|
||||
@ -72,27 +70,39 @@ typedef struct BDRVNBDState {
|
||||
QIOChannel *ioc; /* The current I/O channel */
|
||||
NBDExportInfo info;
|
||||
|
||||
CoMutex send_mutex;
|
||||
CoQueue free_sema;
|
||||
|
||||
CoMutex receive_mutex;
|
||||
int in_flight;
|
||||
/*
|
||||
* Protects state, free_sema, in_flight, requests[].coroutine,
|
||||
* reconnect_delay_timer.
|
||||
*/
|
||||
QemuMutex requests_lock;
|
||||
NBDClientState state;
|
||||
|
||||
CoQueue free_sema;
|
||||
int in_flight;
|
||||
NBDClientRequest requests[MAX_NBD_REQUESTS];
|
||||
QEMUTimer *reconnect_delay_timer;
|
||||
|
||||
/* Protects sending data on the socket. */
|
||||
CoMutex send_mutex;
|
||||
|
||||
/*
|
||||
* Protects receiving reply headers from the socket, as well as the
|
||||
* fields reply and requests[].receiving
|
||||
*/
|
||||
CoMutex receive_mutex;
|
||||
NBDReply reply;
|
||||
|
||||
QEMUTimer *open_timer;
|
||||
|
||||
NBDClientRequest requests[MAX_NBD_REQUESTS];
|
||||
NBDReply reply;
|
||||
BlockDriverState *bs;
|
||||
|
||||
/* Connection parameters */
|
||||
uint32_t reconnect_delay;
|
||||
uint32_t open_timeout;
|
||||
SocketAddress *saddr;
|
||||
char *export, *tlscredsid;
|
||||
char *export;
|
||||
char *tlscredsid;
|
||||
QCryptoTLSCreds *tlscreds;
|
||||
const char *hostname;
|
||||
char *tlshostname;
|
||||
char *x_dirty_bitmap;
|
||||
bool alloc_depth;
|
||||
|
||||
@ -121,16 +131,14 @@ static void nbd_clear_bdrvstate(BlockDriverState *bs)
|
||||
s->export = NULL;
|
||||
g_free(s->tlscredsid);
|
||||
s->tlscredsid = NULL;
|
||||
g_free(s->tlshostname);
|
||||
s->tlshostname = NULL;
|
||||
g_free(s->x_dirty_bitmap);
|
||||
s->x_dirty_bitmap = NULL;
|
||||
}
|
||||
|
||||
static bool nbd_client_connected(BDRVNBDState *s)
|
||||
{
|
||||
return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED;
|
||||
}
|
||||
|
||||
static bool nbd_recv_coroutine_wake_one(NBDClientRequest *req)
|
||||
/* Called with s->receive_mutex taken. */
|
||||
static bool coroutine_fn nbd_recv_coroutine_wake_one(NBDClientRequest *req)
|
||||
{
|
||||
if (req->receiving) {
|
||||
req->receiving = false;
|
||||
@ -141,33 +149,39 @@ static bool nbd_recv_coroutine_wake_one(NBDClientRequest *req)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void nbd_recv_coroutines_wake(BDRVNBDState *s, bool all)
|
||||
static void coroutine_fn nbd_recv_coroutines_wake(BDRVNBDState *s)
|
||||
{
|
||||
int i;
|
||||
|
||||
QEMU_LOCK_GUARD(&s->receive_mutex);
|
||||
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
|
||||
if (nbd_recv_coroutine_wake_one(&s->requests[i]) && !all) {
|
||||
if (nbd_recv_coroutine_wake_one(&s->requests[i])) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void nbd_channel_error(BDRVNBDState *s, int ret)
|
||||
/* Called with s->requests_lock held. */
|
||||
static void coroutine_fn nbd_channel_error_locked(BDRVNBDState *s, int ret)
|
||||
{
|
||||
if (nbd_client_connected(s)) {
|
||||
if (s->state == NBD_CLIENT_CONNECTED) {
|
||||
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
||||
}
|
||||
|
||||
if (ret == -EIO) {
|
||||
if (nbd_client_connected(s)) {
|
||||
if (s->state == NBD_CLIENT_CONNECTED) {
|
||||
s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
|
||||
NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
}
|
||||
} else {
|
||||
s->state = NBD_CLIENT_QUIT;
|
||||
}
|
||||
}
|
||||
|
||||
nbd_recv_coroutines_wake(s, true);
|
||||
static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret)
|
||||
{
|
||||
QEMU_LOCK_GUARD(&s->requests_lock);
|
||||
nbd_channel_error_locked(s, ret);
|
||||
}
|
||||
|
||||
static void reconnect_delay_timer_del(BDRVNBDState *s)
|
||||
@ -182,23 +196,18 @@ static void reconnect_delay_timer_cb(void *opaque)
|
||||
{
|
||||
BDRVNBDState *s = opaque;
|
||||
|
||||
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
|
||||
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
nbd_co_establish_connection_cancel(s->conn);
|
||||
while (qemu_co_enter_next(&s->free_sema, NULL)) {
|
||||
/* Resume all queued requests */
|
||||
}
|
||||
}
|
||||
|
||||
reconnect_delay_timer_del(s);
|
||||
WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
|
||||
if (s->state != NBD_CLIENT_CONNECTING_WAIT) {
|
||||
return;
|
||||
}
|
||||
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
}
|
||||
nbd_co_establish_connection_cancel(s->conn);
|
||||
}
|
||||
|
||||
static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
|
||||
{
|
||||
if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTING_WAIT) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(!s->reconnect_delay_timer);
|
||||
s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
|
||||
QEMU_CLOCK_REALTIME,
|
||||
@ -221,7 +230,9 @@ static void nbd_teardown_connection(BlockDriverState *bs)
|
||||
s->ioc = NULL;
|
||||
}
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
|
||||
s->state = NBD_CLIENT_QUIT;
|
||||
}
|
||||
}
|
||||
|
||||
static void open_timer_del(BDRVNBDState *s)
|
||||
@ -250,16 +261,13 @@ static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
|
||||
timer_mod(s->open_timer, expire_time_ns);
|
||||
}
|
||||
|
||||
static bool nbd_client_connecting(BDRVNBDState *s)
|
||||
static bool nbd_client_will_reconnect(BDRVNBDState *s)
|
||||
{
|
||||
NBDClientState state = qatomic_load_acquire(&s->state);
|
||||
return state == NBD_CLIENT_CONNECTING_WAIT ||
|
||||
state == NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
}
|
||||
|
||||
static bool nbd_client_connecting_wait(BDRVNBDState *s)
|
||||
{
|
||||
return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT;
|
||||
/*
|
||||
* Called only after a socket error, so this is not performance sensitive.
|
||||
*/
|
||||
QEMU_LOCK_GUARD(&s->requests_lock);
|
||||
return s->state == NBD_CLIENT_CONNECTING_WAIT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -308,11 +316,11 @@ static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
|
||||
}
|
||||
|
||||
int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
|
||||
Error **errp)
|
||||
bool blocking, Error **errp)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
int ret;
|
||||
bool blocking = nbd_client_connecting_wait(s);
|
||||
IO_CODE();
|
||||
|
||||
assert(!s->ioc);
|
||||
|
||||
@ -346,34 +354,42 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
|
||||
qio_channel_attach_aio_context(s->ioc, bdrv_get_aio_context(bs));
|
||||
|
||||
/* successfully connected */
|
||||
WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
|
||||
s->state = NBD_CLIENT_CONNECTED;
|
||||
qemu_co_queue_restart_all(&s->free_sema);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* called under s->send_mutex */
|
||||
/* Called with s->requests_lock held. */
|
||||
static bool nbd_client_connecting(BDRVNBDState *s)
|
||||
{
|
||||
return s->state == NBD_CLIENT_CONNECTING_WAIT ||
|
||||
s->state == NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
}
|
||||
|
||||
/* Called with s->requests_lock taken. */
|
||||
static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
|
||||
{
|
||||
assert(nbd_client_connecting(s));
|
||||
assert(s->in_flight == 0);
|
||||
|
||||
if (nbd_client_connecting_wait(s) && s->reconnect_delay &&
|
||||
!s->reconnect_delay_timer)
|
||||
{
|
||||
/*
|
||||
* It's first reconnect attempt after switching to
|
||||
* NBD_CLIENT_CONNECTING_WAIT
|
||||
*/
|
||||
reconnect_delay_timer_init(s,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
|
||||
s->reconnect_delay * NANOSECONDS_PER_SECOND);
|
||||
}
|
||||
bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT;
|
||||
|
||||
/*
|
||||
* Now we are sure that nobody is accessing the channel, and no one will
|
||||
* try until we set the state to CONNECTED.
|
||||
*/
|
||||
assert(nbd_client_connecting(s));
|
||||
assert(s->in_flight == 1);
|
||||
|
||||
if (blocking && !s->reconnect_delay_timer) {
|
||||
/*
|
||||
* It's the first reconnect attempt after switching to
|
||||
* NBD_CLIENT_CONNECTING_WAIT
|
||||
*/
|
||||
g_assert(s->reconnect_delay);
|
||||
reconnect_delay_timer_init(s,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
|
||||
s->reconnect_delay * NANOSECONDS_PER_SECOND);
|
||||
}
|
||||
|
||||
/* Finalize previous connection if any */
|
||||
if (s->ioc) {
|
||||
@ -384,7 +400,9 @@ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
|
||||
s->ioc = NULL;
|
||||
}
|
||||
|
||||
nbd_co_do_establish_connection(s->bs, NULL);
|
||||
qemu_mutex_unlock(&s->requests_lock);
|
||||
nbd_co_do_establish_connection(s->bs, blocking, NULL);
|
||||
qemu_mutex_lock(&s->requests_lock);
|
||||
|
||||
/*
|
||||
* The reconnect attempt is done (maybe successfully, maybe not), so
|
||||
@ -406,10 +424,6 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!nbd_client_connected(s)) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (s->reply.handle != 0) {
|
||||
/*
|
||||
* Some other request is being handled now. It should already be
|
||||
@ -424,11 +438,10 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
|
||||
|
||||
qemu_coroutine_yield();
|
||||
/*
|
||||
* We may be woken for 3 reasons:
|
||||
* We may be woken for 2 reasons:
|
||||
* 1. From this function, executing in parallel coroutine, when our
|
||||
* handle is received.
|
||||
* 2. From nbd_channel_error(), when connection is lost.
|
||||
* 3. From nbd_co_receive_one_chunk(), when previous request is
|
||||
* 2. From nbd_co_receive_one_chunk(), when previous request is
|
||||
* finished and s->reply.handle set to 0.
|
||||
* Anyway, it's OK to lock the mutex and go to the next iteration.
|
||||
*/
|
||||
@ -450,44 +463,43 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
|
||||
nbd_channel_error(s, -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
|
||||
if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) {
|
||||
nbd_channel_error(s, -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (s->reply.handle == handle) {
|
||||
/* We are done */
|
||||
return 0;
|
||||
}
|
||||
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
|
||||
if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].reply_possible) {
|
||||
nbd_channel_error(s, -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
nbd_recv_coroutine_wake_one(&s->requests[ind2]);
|
||||
}
|
||||
}
|
||||
|
||||
static int nbd_co_send_request(BlockDriverState *bs,
|
||||
static int coroutine_fn nbd_co_send_request(BlockDriverState *bs,
|
||||
NBDRequest *request,
|
||||
QEMUIOVector *qiov)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
int rc, i = -1;
|
||||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
|
||||
qemu_mutex_lock(&s->requests_lock);
|
||||
while (s->in_flight == MAX_NBD_REQUESTS ||
|
||||
(!nbd_client_connected(s) && s->in_flight > 0))
|
||||
{
|
||||
qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
|
||||
}
|
||||
|
||||
if (nbd_client_connecting(s)) {
|
||||
nbd_reconnect_attempt(s);
|
||||
}
|
||||
|
||||
if (!nbd_client_connected(s)) {
|
||||
rc = -EIO;
|
||||
goto err;
|
||||
(s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) {
|
||||
qemu_co_queue_wait(&s->free_sema, &s->requests_lock);
|
||||
}
|
||||
|
||||
s->in_flight++;
|
||||
if (s->state != NBD_CLIENT_CONNECTED) {
|
||||
if (nbd_client_connecting(s)) {
|
||||
nbd_reconnect_attempt(s);
|
||||
qemu_co_queue_restart_all(&s->free_sema);
|
||||
}
|
||||
if (s->state != NBD_CLIENT_CONNECTED) {
|
||||
rc = -EIO;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
|
||||
if (s->requests[i].coroutine == NULL) {
|
||||
@ -495,14 +507,13 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
||||
}
|
||||
}
|
||||
|
||||
g_assert(qemu_in_coroutine());
|
||||
assert(i < MAX_NBD_REQUESTS);
|
||||
|
||||
s->requests[i].coroutine = qemu_coroutine_self();
|
||||
s->requests[i].offset = request->from;
|
||||
s->requests[i].receiving = false;
|
||||
s->requests[i].reply_possible = true;
|
||||
qemu_mutex_unlock(&s->requests_lock);
|
||||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
request->handle = INDEX_TO_HANDLE(s, i);
|
||||
|
||||
assert(s->ioc);
|
||||
@ -510,7 +521,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
||||
if (qiov) {
|
||||
qio_channel_set_cork(s->ioc, true);
|
||||
rc = nbd_send_request(s->ioc, request);
|
||||
if (nbd_client_connected(s) && rc >= 0) {
|
||||
if (rc >= 0) {
|
||||
if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
|
||||
NULL) < 0) {
|
||||
rc = -EIO;
|
||||
@ -522,17 +533,19 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
||||
} else {
|
||||
rc = nbd_send_request(s->ioc, request);
|
||||
}
|
||||
qemu_co_mutex_unlock(&s->send_mutex);
|
||||
|
||||
err:
|
||||
if (rc < 0) {
|
||||
nbd_channel_error(s, rc);
|
||||
qemu_mutex_lock(&s->requests_lock);
|
||||
err:
|
||||
nbd_channel_error_locked(s, rc);
|
||||
if (i != -1) {
|
||||
s->requests[i].coroutine = NULL;
|
||||
}
|
||||
s->in_flight--;
|
||||
qemu_co_queue_next(&s->free_sema);
|
||||
qemu_mutex_unlock(&s->requests_lock);
|
||||
}
|
||||
}
|
||||
qemu_co_mutex_unlock(&s->send_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -719,8 +732,8 @@ static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nbd_co_receive_offset_data_payload(BDRVNBDState *s,
|
||||
uint64_t orig_offset,
|
||||
static int coroutine_fn
|
||||
nbd_co_receive_offset_data_payload(BDRVNBDState *s, uint64_t orig_offset,
|
||||
QEMUIOVector *qiov, Error **errp)
|
||||
{
|
||||
QEMUIOVector sub_qiov;
|
||||
@ -827,8 +840,8 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
|
||||
}
|
||||
*request_ret = 0;
|
||||
|
||||
nbd_receive_replies(s, handle);
|
||||
if (!nbd_client_connected(s)) {
|
||||
ret = nbd_receive_replies(s, handle);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Connection closed");
|
||||
return -EIO;
|
||||
}
|
||||
@ -920,7 +933,7 @@ static coroutine_fn int nbd_co_receive_one_chunk(
|
||||
}
|
||||
s->reply.handle = 0;
|
||||
|
||||
nbd_recv_coroutines_wake(s, false);
|
||||
nbd_recv_coroutines_wake(s);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -980,11 +993,6 @@ static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s,
|
||||
NBDReply local_reply;
|
||||
NBDStructuredReplyChunk *chunk;
|
||||
Error *local_err = NULL;
|
||||
if (!nbd_client_connected(s)) {
|
||||
error_setg(&local_err, "Connection closed");
|
||||
nbd_iter_channel_error(iter, -EIO, &local_err);
|
||||
goto break_loop;
|
||||
}
|
||||
|
||||
if (iter->done) {
|
||||
/* Previous iteration was last. */
|
||||
@ -1005,7 +1013,7 @@ static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s,
|
||||
}
|
||||
|
||||
/* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
|
||||
if (nbd_reply_is_simple(reply) || !nbd_client_connected(s)) {
|
||||
if (nbd_reply_is_simple(reply) || iter->ret < 0) {
|
||||
goto break_loop;
|
||||
}
|
||||
|
||||
@ -1027,17 +1035,16 @@ static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s,
|
||||
return true;
|
||||
|
||||
break_loop:
|
||||
qemu_mutex_lock(&s->requests_lock);
|
||||
s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
|
||||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
s->in_flight--;
|
||||
qemu_co_queue_next(&s->free_sema);
|
||||
qemu_co_mutex_unlock(&s->send_mutex);
|
||||
qemu_mutex_unlock(&s->requests_lock);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
|
||||
static int coroutine_fn nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
|
||||
int *request_ret, Error **errp)
|
||||
{
|
||||
NBDReplyChunkIter iter;
|
||||
@ -1051,7 +1058,7 @@ static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
|
||||
return iter.ret;
|
||||
}
|
||||
|
||||
static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
|
||||
static int coroutine_fn nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
|
||||
uint64_t offset, QEMUIOVector *qiov,
|
||||
int *request_ret, Error **errp)
|
||||
{
|
||||
@ -1103,7 +1110,7 @@ static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
|
||||
return iter.ret;
|
||||
}
|
||||
|
||||
static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
|
||||
static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
|
||||
uint64_t handle, uint64_t length,
|
||||
NBDExtent *extent,
|
||||
int *request_ret, Error **errp)
|
||||
@ -1163,7 +1170,7 @@ static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
|
||||
return iter.ret;
|
||||
}
|
||||
|
||||
static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
|
||||
static int coroutine_fn nbd_co_request(BlockDriverState *bs, NBDRequest *request,
|
||||
QEMUIOVector *write_qiov)
|
||||
{
|
||||
int ret, request_ret;
|
||||
@ -1195,12 +1202,12 @@ static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
|
||||
error_free(local_err);
|
||||
local_err = NULL;
|
||||
}
|
||||
} while (ret < 0 && nbd_client_connecting_wait(s));
|
||||
} while (ret < 0 && nbd_client_will_reconnect(s));
|
||||
|
||||
return ret ? ret : request_ret;
|
||||
}
|
||||
|
||||
static int nbd_client_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
static int coroutine_fn nbd_client_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
@ -1254,12 +1261,12 @@ static int nbd_client_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
error_free(local_err);
|
||||
local_err = NULL;
|
||||
}
|
||||
} while (ret < 0 && nbd_client_connecting_wait(s));
|
||||
} while (ret < 0 && nbd_client_will_reconnect(s));
|
||||
|
||||
return ret ? ret : request_ret;
|
||||
}
|
||||
|
||||
static int nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
static int coroutine_fn nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
@ -1284,7 +1291,7 @@ static int nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
return nbd_co_request(bs, &request, qiov);
|
||||
}
|
||||
|
||||
static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
static int coroutine_fn nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
@ -1319,7 +1326,7 @@ static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
return nbd_co_request(bs, &request, NULL);
|
||||
}
|
||||
|
||||
static int nbd_client_co_flush(BlockDriverState *bs)
|
||||
static int coroutine_fn nbd_client_co_flush(BlockDriverState *bs)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
NBDRequest request = { .type = NBD_CMD_FLUSH };
|
||||
@ -1334,7 +1341,7 @@ static int nbd_client_co_flush(BlockDriverState *bs)
|
||||
return nbd_co_request(bs, &request, NULL);
|
||||
}
|
||||
|
||||
static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset,
|
||||
static int coroutine_fn nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
@ -1412,7 +1419,7 @@ static int coroutine_fn nbd_client_co_block_status(
|
||||
error_free(local_err);
|
||||
local_err = NULL;
|
||||
}
|
||||
} while (ret < 0 && nbd_client_connecting_wait(s));
|
||||
} while (ret < 0 && nbd_client_will_reconnect(s));
|
||||
|
||||
if (ret < 0 || request_ret < 0) {
|
||||
return ret ? ret : request_ret;
|
||||
@ -1444,8 +1451,9 @@ static void nbd_yank(void *opaque)
|
||||
BlockDriverState *bs = opaque;
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
|
||||
qatomic_store_release(&s->state, NBD_CLIENT_QUIT);
|
||||
QEMU_LOCK_GUARD(&s->requests_lock);
|
||||
qio_channel_shutdown(QIO_CHANNEL(s->ioc), QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
||||
s->state = NBD_CLIENT_QUIT;
|
||||
}
|
||||
|
||||
static void nbd_client_close(BlockDriverState *bs)
|
||||
@ -1764,6 +1772,11 @@ static QemuOptsList nbd_runtime_opts = {
|
||||
.type = QEMU_OPT_STRING,
|
||||
.help = "ID of the TLS credentials to use",
|
||||
},
|
||||
{
|
||||
.name = "tls-hostname",
|
||||
.type = QEMU_OPT_STRING,
|
||||
.help = "Override hostname for validating TLS x509 certificate",
|
||||
},
|
||||
{
|
||||
.name = "x-dirty-bitmap",
|
||||
.type = QEMU_OPT_STRING,
|
||||
@ -1830,12 +1843,11 @@ static int nbd_process_options(BlockDriverState *bs, QDict *options,
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */
|
||||
if (s->saddr->type != SOCKET_ADDRESS_TYPE_INET) {
|
||||
error_setg(errp, "TLS only supported over IP sockets");
|
||||
goto error;
|
||||
s->tlshostname = g_strdup(qemu_opt_get(opts, "tls-hostname"));
|
||||
if (!s->tlshostname &&
|
||||
s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
|
||||
s->tlshostname = g_strdup(s->saddr->u.inet.host);
|
||||
}
|
||||
s->hostname = s->saddr->u.inet.host;
|
||||
}
|
||||
|
||||
s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
|
||||
@ -1861,8 +1873,9 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
|
||||
s->bs = bs;
|
||||
qemu_co_mutex_init(&s->send_mutex);
|
||||
qemu_mutex_init(&s->requests_lock);
|
||||
qemu_co_queue_init(&s->free_sema);
|
||||
qemu_co_mutex_init(&s->send_mutex);
|
||||
qemu_co_mutex_init(&s->receive_mutex);
|
||||
|
||||
if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
|
||||
@ -1875,7 +1888,8 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
|
||||
s->conn = nbd_client_connection_new(s->saddr, true, s->export,
|
||||
s->x_dirty_bitmap, s->tlscreds);
|
||||
s->x_dirty_bitmap, s->tlscreds,
|
||||
s->tlshostname);
|
||||
|
||||
if (s->open_timeout) {
|
||||
nbd_client_connection_enable_retry(s->conn);
|
||||
@ -1884,7 +1898,7 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
|
||||
s->state = NBD_CLIENT_CONNECTING_WAIT;
|
||||
ret = nbd_do_establish_connection(bs, errp);
|
||||
ret = nbd_do_establish_connection(bs, true, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
@ -1906,7 +1920,7 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nbd_co_flush(BlockDriverState *bs)
|
||||
static int coroutine_fn nbd_co_flush(BlockDriverState *bs)
|
||||
{
|
||||
return nbd_client_co_flush(bs);
|
||||
}
|
||||
@ -2036,6 +2050,7 @@ static const char *const nbd_strong_runtime_opts[] = {
|
||||
"port",
|
||||
"export",
|
||||
"tls-creds",
|
||||
"tls-hostname",
|
||||
"server.",
|
||||
|
||||
NULL
|
||||
@ -2047,10 +2062,11 @@ static void nbd_cancel_in_flight(BlockDriverState *bs)
|
||||
|
||||
reconnect_delay_timer_del(s);
|
||||
|
||||
qemu_mutex_lock(&s->requests_lock);
|
||||
if (s->state == NBD_CLIENT_CONNECTING_WAIT) {
|
||||
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
qemu_co_queue_restart_all(&s->free_sema);
|
||||
}
|
||||
qemu_mutex_unlock(&s->requests_lock);
|
||||
|
||||
nbd_co_establish_connection_cancel(s->conn);
|
||||
}
|
||||
|
23
block/nvme.c
23
block/nvme.c
@ -21,6 +21,7 @@
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "qemu/vfio-helpers.h"
|
||||
#include "block/block_int.h"
|
||||
#include "sysemu/replay.h"
|
||||
@ -168,9 +169,9 @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
|
||||
size_t bytes;
|
||||
int r;
|
||||
|
||||
bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size);
|
||||
bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size());
|
||||
q->head = q->tail = 0;
|
||||
q->queue = qemu_try_memalign(qemu_real_host_page_size, bytes);
|
||||
q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes);
|
||||
if (!q->queue) {
|
||||
error_setg(errp, "Cannot allocate queue");
|
||||
return false;
|
||||
@ -231,8 +232,8 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
|
||||
trace_nvme_create_queue_pair(idx, q, size, aio_context,
|
||||
event_notifier_get_fd(s->irq_notifier));
|
||||
bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
|
||||
qemu_real_host_page_size);
|
||||
q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes);
|
||||
qemu_real_host_page_size());
|
||||
q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes);
|
||||
if (!q->prp_list_pages) {
|
||||
error_setg(errp, "Cannot allocate PRP page list");
|
||||
goto fail;
|
||||
@ -532,9 +533,9 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
||||
.opcode = NVME_ADM_CMD_IDENTIFY,
|
||||
.cdw10 = cpu_to_le32(0x1),
|
||||
};
|
||||
size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size);
|
||||
size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size());
|
||||
|
||||
id = qemu_try_memalign(qemu_real_host_page_size, id_size);
|
||||
id = qemu_try_memalign(qemu_real_host_page_size(), id_size);
|
||||
if (!id) {
|
||||
error_setg(errp, "Cannot allocate buffer for identify response");
|
||||
goto out;
|
||||
@ -1047,7 +1048,7 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
|
||||
bool retry = true;
|
||||
uint64_t iova;
|
||||
size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
|
||||
qemu_real_host_page_size);
|
||||
qemu_real_host_page_size());
|
||||
try_map:
|
||||
r = qemu_vfio_dma_map(s->vfio,
|
||||
qiov->iov[i].iov_base,
|
||||
@ -1223,8 +1224,8 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs,
|
||||
|
||||
for (i = 0; i < qiov->niov; ++i) {
|
||||
if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
|
||||
qemu_real_host_page_size) ||
|
||||
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {
|
||||
qemu_real_host_page_size()) ||
|
||||
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) {
|
||||
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
|
||||
qiov->iov[i].iov_len, s->page_size);
|
||||
return false;
|
||||
@ -1240,7 +1241,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
int r;
|
||||
QEMU_AUTO_VFREE uint8_t *buf = NULL;
|
||||
QEMUIOVector local_qiov;
|
||||
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
|
||||
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size());
|
||||
assert(QEMU_IS_ALIGNED(offset, s->page_size));
|
||||
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
|
||||
assert(bytes <= s->max_transfer);
|
||||
@ -1250,7 +1251,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
}
|
||||
s->stats.unaligned_accesses++;
|
||||
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
|
||||
buf = qemu_try_memalign(qemu_real_host_page_size, len);
|
||||
buf = qemu_try_memalign(qemu_real_host_page_size(), len);
|
||||
|
||||
if (!buf) {
|
||||
return -ENOMEM;
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "parallels.h"
|
||||
#include "crypto/hash.h"
|
||||
#include "qemu/uuid.h"
|
||||
#include "qemu/memalign.h"
|
||||
|
||||
#define PARALLELS_FORMAT_EXTENSION_MAGIC 0xAB234CEF23DCEA87ULL
|
||||
|
||||
@ -260,7 +261,7 @@ static int parallels_parse_format_extension(BlockDriverState *bs,
|
||||
break;
|
||||
|
||||
default:
|
||||
error_setg(errp, "Unknown feature: 0x%" PRIu64, fh.magic);
|
||||
error_setg(errp, "Unknown feature: 0x%" PRIx64, fh.magic);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "qapi/qapi-visit-block-core.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/bitmap.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "migration/blocker.h"
|
||||
#include "parallels.h"
|
||||
|
||||
@ -869,11 +870,11 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
}
|
||||
|
||||
s->bat_dirty_block = 4 * qemu_real_host_page_size;
|
||||
s->bat_dirty_block = 4 * qemu_real_host_page_size();
|
||||
s->bat_dirty_bmap =
|
||||
bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block));
|
||||
|
||||
/* Disable migration until bdrv_invalidate_cache method is added */
|
||||
/* Disable migration until bdrv_activate method is added */
|
||||
error_setg(&s->migration_blocker, "The Parallels format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
|
@ -276,6 +276,10 @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset,
|
||||
int64_t end = offset + bytes;
|
||||
int64_t prealloc_start, prealloc_end;
|
||||
int ret;
|
||||
uint32_t file_align = bs->file->bs->bl.request_alignment;
|
||||
uint32_t prealloc_align = MAX(s->opts.prealloc_align, file_align);
|
||||
|
||||
assert(QEMU_IS_ALIGNED(prealloc_align, file_align));
|
||||
|
||||
if (!has_prealloc_perms(bs)) {
|
||||
/* We don't have state neither should try to recover it */
|
||||
@ -320,9 +324,14 @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset,
|
||||
|
||||
/* Now we want new preallocation, as request writes beyond s->file_end. */
|
||||
|
||||
prealloc_start = want_merge_zero ? MIN(offset, s->file_end) : s->file_end;
|
||||
prealloc_end = QEMU_ALIGN_UP(end + s->opts.prealloc_size,
|
||||
s->opts.prealloc_align);
|
||||
prealloc_start = QEMU_ALIGN_UP(
|
||||
want_merge_zero ? MIN(offset, s->file_end) : s->file_end,
|
||||
file_align);
|
||||
prealloc_end = QEMU_ALIGN_UP(
|
||||
MAX(prealloc_start, end) + s->opts.prealloc_size,
|
||||
prealloc_align);
|
||||
|
||||
want_merge_zero = want_merge_zero && (prealloc_start <= offset);
|
||||
|
||||
ret = bdrv_co_pwrite_zeroes(
|
||||
bs->file, prealloc_start, prealloc_end - prealloc_start,
|
||||
|
@ -318,6 +318,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device,
|
||||
bool has_id, const char *id,
|
||||
const char *filename,
|
||||
bool has_format, const char *format,
|
||||
bool has_force, bool force,
|
||||
bool has_read_only,
|
||||
BlockdevChangeReadOnlyMode read_only,
|
||||
Error **errp)
|
||||
@ -380,7 +381,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device,
|
||||
|
||||
rc = do_open_tray(has_device ? device : NULL,
|
||||
has_id ? id : NULL,
|
||||
false, &err);
|
||||
force, &err);
|
||||
if (rc && rc != -ENOSYS) {
|
||||
error_propagate(errp, err);
|
||||
goto fail;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include <zlib.h>
|
||||
#include "qapi/qmp/qdict.h"
|
||||
#include "qapi/qmp/qstring.h"
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "qcow2.h"
|
||||
#include "trace.h"
|
||||
|
||||
@ -74,7 +75,7 @@ static void qcow2_cache_table_release(Qcow2Cache *c, int i, int num_tables)
|
||||
/* Using MADV_DONTNEED to discard memory is a Linux-specific feature */
|
||||
#ifdef CONFIG_LINUX
|
||||
void *t = qcow2_cache_get_table_addr(c, i);
|
||||
int align = qemu_real_host_page_size;
|
||||
int align = qemu_real_host_page_size();
|
||||
size_t mem_size = (size_t) c->table_size * num_tables;
|
||||
size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t;
|
||||
size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "qapi/error.h"
|
||||
#include "qcow2.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "trace.h"
|
||||
|
||||
int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
|
||||
|
@ -28,11 +28,14 @@
|
||||
#include "qemu/range.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "trace.h"
|
||||
|
||||
static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size,
|
||||
uint64_t max);
|
||||
static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
|
||||
|
||||
G_GNUC_WARN_UNUSED_RESULT
|
||||
static int update_refcount(BlockDriverState *bs,
|
||||
int64_t offset, int64_t length, uint64_t addend,
|
||||
bool decrease, enum qcow2_discard_type type);
|
||||
|
||||
@ -802,7 +805,7 @@ found:
|
||||
/* XXX: cache several refcount block clusters ? */
|
||||
/* @addend is the absolute value of the addend; if @decrease is set, @addend
|
||||
* will be subtracted from the current refcount, otherwise it will be added */
|
||||
static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
|
||||
static int update_refcount(BlockDriverState *bs,
|
||||
int64_t offset,
|
||||
int64_t length,
|
||||
uint64_t addend,
|
||||
@ -2434,10 +2437,165 @@ static int64_t alloc_clusters_imrt(BlockDriverState *bs,
|
||||
return cluster << s->cluster_bits;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for rebuild_refcount_structure().
|
||||
*
|
||||
* Scan the range of clusters [first_cluster, end_cluster) for allocated
|
||||
* clusters and write all corresponding refblocks to disk. The refblock
|
||||
* and allocation data is taken from the in-memory refcount table
|
||||
* *refcount_table[] (of size *nb_clusters), which is basically one big
|
||||
* (unlimited size) refblock for the whole image.
|
||||
*
|
||||
* For these refblocks, clusters are allocated using said in-memory
|
||||
* refcount table. Care is taken that these allocations are reflected
|
||||
* in the refblocks written to disk.
|
||||
*
|
||||
* The refblocks' offsets are written into a reftable, which is
|
||||
* *on_disk_reftable_ptr[] (of size *on_disk_reftable_entries_ptr). If
|
||||
* that reftable is of insufficient size, it will be resized to fit.
|
||||
* This reftable is not written to disk.
|
||||
*
|
||||
* (If *on_disk_reftable_ptr is not NULL, the entries within are assumed
|
||||
* to point to existing valid refblocks that do not need to be allocated
|
||||
* again.)
|
||||
*
|
||||
* Return whether the on-disk reftable array was resized (true/false),
|
||||
* or -errno on error.
|
||||
*/
|
||||
static int rebuild_refcounts_write_refblocks(
|
||||
BlockDriverState *bs, void **refcount_table, int64_t *nb_clusters,
|
||||
int64_t first_cluster, int64_t end_cluster,
|
||||
uint64_t **on_disk_reftable_ptr, uint32_t *on_disk_reftable_entries_ptr,
|
||||
Error **errp
|
||||
)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t cluster;
|
||||
int64_t refblock_offset, refblock_start, refblock_index;
|
||||
int64_t first_free_cluster = 0;
|
||||
uint64_t *on_disk_reftable = *on_disk_reftable_ptr;
|
||||
uint32_t on_disk_reftable_entries = *on_disk_reftable_entries_ptr;
|
||||
void *on_disk_refblock;
|
||||
bool reftable_grown = false;
|
||||
int ret;
|
||||
|
||||
for (cluster = first_cluster; cluster < end_cluster; cluster++) {
|
||||
/* Check all clusters to find refblocks that contain non-zero entries */
|
||||
if (!s->get_refcount(*refcount_table, cluster)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* This cluster is allocated, so we need to create a refblock
|
||||
* for it. The data we will write to disk is just the
|
||||
* respective slice from *refcount_table, so it will contain
|
||||
* accurate refcounts for all clusters belonging to this
|
||||
* refblock. After we have written it, we will therefore skip
|
||||
* all remaining clusters in this refblock.
|
||||
*/
|
||||
|
||||
refblock_index = cluster >> s->refcount_block_bits;
|
||||
refblock_start = refblock_index << s->refcount_block_bits;
|
||||
|
||||
if (on_disk_reftable_entries > refblock_index &&
|
||||
on_disk_reftable[refblock_index])
|
||||
{
|
||||
/*
|
||||
* We can get here after a `goto write_refblocks`: We have a
|
||||
* reftable from a previous run, and the refblock is already
|
||||
* allocated. No need to allocate it again.
|
||||
*/
|
||||
refblock_offset = on_disk_reftable[refblock_index];
|
||||
} else {
|
||||
int64_t refblock_cluster_index;
|
||||
|
||||
/* Don't allocate a cluster in a refblock already written to disk */
|
||||
if (first_free_cluster < refblock_start) {
|
||||
first_free_cluster = refblock_start;
|
||||
}
|
||||
refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table,
|
||||
nb_clusters,
|
||||
&first_free_cluster);
|
||||
if (refblock_offset < 0) {
|
||||
error_setg_errno(errp, -refblock_offset,
|
||||
"ERROR allocating refblock");
|
||||
return refblock_offset;
|
||||
}
|
||||
|
||||
refblock_cluster_index = refblock_offset / s->cluster_size;
|
||||
if (refblock_cluster_index >= end_cluster) {
|
||||
/*
|
||||
* We must write the refblock that holds this refblock's
|
||||
* refcount
|
||||
*/
|
||||
end_cluster = refblock_cluster_index + 1;
|
||||
}
|
||||
|
||||
if (on_disk_reftable_entries <= refblock_index) {
|
||||
on_disk_reftable_entries =
|
||||
ROUND_UP((refblock_index + 1) * REFTABLE_ENTRY_SIZE,
|
||||
s->cluster_size) / REFTABLE_ENTRY_SIZE;
|
||||
on_disk_reftable =
|
||||
g_try_realloc(on_disk_reftable,
|
||||
on_disk_reftable_entries *
|
||||
REFTABLE_ENTRY_SIZE);
|
||||
if (!on_disk_reftable) {
|
||||
error_setg(errp, "ERROR allocating reftable memory");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(on_disk_reftable + *on_disk_reftable_entries_ptr, 0,
|
||||
(on_disk_reftable_entries -
|
||||
*on_disk_reftable_entries_ptr) *
|
||||
REFTABLE_ENTRY_SIZE);
|
||||
|
||||
*on_disk_reftable_ptr = on_disk_reftable;
|
||||
*on_disk_reftable_entries_ptr = on_disk_reftable_entries;
|
||||
|
||||
reftable_grown = true;
|
||||
} else {
|
||||
assert(on_disk_reftable);
|
||||
}
|
||||
on_disk_reftable[refblock_index] = refblock_offset;
|
||||
}
|
||||
|
||||
/* Refblock is allocated, write it to disk */
|
||||
|
||||
ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
|
||||
s->cluster_size, false);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "ERROR writing refblock");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The refblock is simply a slice of *refcount_table.
|
||||
* Note that the size of *refcount_table is always aligned to
|
||||
* whole clusters, so the write operation will not result in
|
||||
* out-of-bounds accesses.
|
||||
*/
|
||||
on_disk_refblock = (void *)((char *) *refcount_table +
|
||||
refblock_index * s->cluster_size);
|
||||
|
||||
ret = bdrv_pwrite(bs->file, refblock_offset, on_disk_refblock,
|
||||
s->cluster_size);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "ERROR writing refblock");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This refblock is done, skip to its end */
|
||||
cluster = refblock_start + s->refcount_block_size - 1;
|
||||
}
|
||||
|
||||
return reftable_grown;
|
||||
}
|
||||
|
||||
/*
|
||||
* Creates a new refcount structure based solely on the in-memory information
|
||||
* given through *refcount_table. All necessary allocations will be reflected
|
||||
* in that array.
|
||||
* given through *refcount_table (this in-memory information is basically just
|
||||
* the concatenation of all refblocks). All necessary allocations will be
|
||||
* reflected in that array.
|
||||
*
|
||||
* On success, the old refcount structure is leaked (it will be covered by the
|
||||
* new refcount structure).
|
||||
@ -2445,15 +2603,18 @@ static int64_t alloc_clusters_imrt(BlockDriverState *bs,
|
||||
static int rebuild_refcount_structure(BlockDriverState *bs,
|
||||
BdrvCheckResult *res,
|
||||
void **refcount_table,
|
||||
int64_t *nb_clusters)
|
||||
int64_t *nb_clusters,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0;
|
||||
int64_t refblock_offset, refblock_start, refblock_index;
|
||||
uint32_t reftable_size = 0;
|
||||
int64_t reftable_offset = -1;
|
||||
int64_t reftable_length = 0;
|
||||
int64_t reftable_clusters;
|
||||
int64_t refblock_index;
|
||||
uint32_t on_disk_reftable_entries = 0;
|
||||
uint64_t *on_disk_reftable = NULL;
|
||||
void *on_disk_refblock;
|
||||
int ret = 0;
|
||||
int reftable_size_changed = 0;
|
||||
struct {
|
||||
uint64_t reftable_offset;
|
||||
uint32_t reftable_clusters;
|
||||
@ -2461,162 +2622,145 @@ static int rebuild_refcount_structure(BlockDriverState *bs,
|
||||
|
||||
qcow2_cache_empty(bs, s->refcount_block_cache);
|
||||
|
||||
write_refblocks:
|
||||
for (; cluster < *nb_clusters; cluster++) {
|
||||
if (!s->get_refcount(*refcount_table, cluster)) {
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* For each refblock containing entries, we try to allocate a
|
||||
* cluster (in the in-memory refcount table) and write its offset
|
||||
* into on_disk_reftable[]. We then write the whole refblock to
|
||||
* disk (as a slice of the in-memory refcount table).
|
||||
* This is done by rebuild_refcounts_write_refblocks().
|
||||
*
|
||||
* Once we have scanned all clusters, we try to find space for the
|
||||
* reftable. This will dirty the in-memory refcount table (i.e.
|
||||
* make it differ from the refblocks we have already written), so we
|
||||
* need to run rebuild_refcounts_write_refblocks() again for the
|
||||
* range of clusters where the reftable has been allocated.
|
||||
*
|
||||
* This second run might make the reftable grow again, in which case
|
||||
* we will need to allocate another space for it, which is why we
|
||||
* repeat all this until the reftable stops growing.
|
||||
*
|
||||
* (This loop will terminate, because with every cluster the
|
||||
* reftable grows, it can accomodate a multitude of more refcounts,
|
||||
* so that at some point this must be able to cover the reftable
|
||||
* and all refblocks describing it.)
|
||||
*
|
||||
* We then convert the reftable to big-endian and write it to disk.
|
||||
*
|
||||
* Note that we never free any reftable allocations. Doing so would
|
||||
* needlessly complicate the algorithm: The eventual second check
|
||||
* run we do will clean up all leaks we have caused.
|
||||
*/
|
||||
|
||||
refblock_index = cluster >> s->refcount_block_bits;
|
||||
refblock_start = refblock_index << s->refcount_block_bits;
|
||||
|
||||
/* Don't allocate a cluster in a refblock already written to disk */
|
||||
if (first_free_cluster < refblock_start) {
|
||||
first_free_cluster = refblock_start;
|
||||
}
|
||||
refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table,
|
||||
nb_clusters, &first_free_cluster);
|
||||
if (refblock_offset < 0) {
|
||||
fprintf(stderr, "ERROR allocating refblock: %s\n",
|
||||
strerror(-refblock_offset));
|
||||
reftable_size_changed =
|
||||
rebuild_refcounts_write_refblocks(bs, refcount_table, nb_clusters,
|
||||
0, *nb_clusters,
|
||||
&on_disk_reftable,
|
||||
&on_disk_reftable_entries, errp);
|
||||
if (reftable_size_changed < 0) {
|
||||
res->check_errors++;
|
||||
ret = refblock_offset;
|
||||
ret = reftable_size_changed;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (reftable_size <= refblock_index) {
|
||||
uint32_t old_reftable_size = reftable_size;
|
||||
uint64_t *new_on_disk_reftable;
|
||||
/*
|
||||
* There was no reftable before, so rebuild_refcounts_write_refblocks()
|
||||
* must have increased its size (from 0 to something).
|
||||
*/
|
||||
assert(reftable_size_changed);
|
||||
|
||||
reftable_size = ROUND_UP((refblock_index + 1) * REFTABLE_ENTRY_SIZE,
|
||||
s->cluster_size) / REFTABLE_ENTRY_SIZE;
|
||||
new_on_disk_reftable = g_try_realloc(on_disk_reftable,
|
||||
reftable_size *
|
||||
REFTABLE_ENTRY_SIZE);
|
||||
if (!new_on_disk_reftable) {
|
||||
do {
|
||||
int64_t reftable_start_cluster, reftable_end_cluster;
|
||||
int64_t first_free_cluster = 0;
|
||||
|
||||
reftable_length = on_disk_reftable_entries * REFTABLE_ENTRY_SIZE;
|
||||
reftable_clusters = size_to_clusters(s, reftable_length);
|
||||
|
||||
reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
|
||||
refcount_table, nb_clusters,
|
||||
&first_free_cluster);
|
||||
if (reftable_offset < 0) {
|
||||
error_setg_errno(errp, -reftable_offset,
|
||||
"ERROR allocating reftable");
|
||||
res->check_errors++;
|
||||
ret = -ENOMEM;
|
||||
ret = reftable_offset;
|
||||
goto fail;
|
||||
}
|
||||
on_disk_reftable = new_on_disk_reftable;
|
||||
|
||||
memset(on_disk_reftable + old_reftable_size, 0,
|
||||
(reftable_size - old_reftable_size) * REFTABLE_ENTRY_SIZE);
|
||||
|
||||
/* The offset we have for the reftable is now no longer valid;
|
||||
* this will leak that range, but we can easily fix that by running
|
||||
* a leak-fixing check after this rebuild operation */
|
||||
reftable_offset = -1;
|
||||
} else {
|
||||
assert(on_disk_reftable);
|
||||
/*
|
||||
* We need to update the affected refblocks, so re-run the
|
||||
* write_refblocks loop for the reftable's range of clusters.
|
||||
*/
|
||||
assert(offset_into_cluster(s, reftable_offset) == 0);
|
||||
reftable_start_cluster = reftable_offset / s->cluster_size;
|
||||
reftable_end_cluster = reftable_start_cluster + reftable_clusters;
|
||||
reftable_size_changed =
|
||||
rebuild_refcounts_write_refblocks(bs, refcount_table, nb_clusters,
|
||||
reftable_start_cluster,
|
||||
reftable_end_cluster,
|
||||
&on_disk_reftable,
|
||||
&on_disk_reftable_entries, errp);
|
||||
if (reftable_size_changed < 0) {
|
||||
res->check_errors++;
|
||||
ret = reftable_size_changed;
|
||||
goto fail;
|
||||
}
|
||||
on_disk_reftable[refblock_index] = refblock_offset;
|
||||
|
||||
/* If this is apparently the last refblock (for now), try to squeeze the
|
||||
* reftable in */
|
||||
if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits &&
|
||||
reftable_offset < 0)
|
||||
/*
|
||||
* If the reftable size has changed, we will need to find a new
|
||||
* allocation, repeating the loop.
|
||||
*/
|
||||
} while (reftable_size_changed);
|
||||
|
||||
/* The above loop must have run at least once */
|
||||
assert(reftable_offset >= 0);
|
||||
|
||||
/*
|
||||
* All allocations are done, all refblocks are written, convert the
|
||||
* reftable to big-endian and write it to disk.
|
||||
*/
|
||||
|
||||
for (refblock_index = 0; refblock_index < on_disk_reftable_entries;
|
||||
refblock_index++)
|
||||
{
|
||||
uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
|
||||
REFTABLE_ENTRY_SIZE);
|
||||
reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
|
||||
refcount_table, nb_clusters,
|
||||
&first_free_cluster);
|
||||
if (reftable_offset < 0) {
|
||||
fprintf(stderr, "ERROR allocating reftable: %s\n",
|
||||
strerror(-reftable_offset));
|
||||
res->check_errors++;
|
||||
ret = reftable_offset;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
|
||||
s->cluster_size, false);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* The size of *refcount_table is always cluster-aligned, therefore the
|
||||
* write operation will not overflow */
|
||||
on_disk_refblock = (void *)((char *) *refcount_table +
|
||||
refblock_index * s->cluster_size);
|
||||
|
||||
ret = bdrv_pwrite(bs->file, refblock_offset, on_disk_refblock,
|
||||
s->cluster_size);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Go to the end of this refblock */
|
||||
cluster = refblock_start + s->refcount_block_size - 1;
|
||||
}
|
||||
|
||||
if (reftable_offset < 0) {
|
||||
uint64_t post_refblock_start, reftable_clusters;
|
||||
|
||||
post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
|
||||
reftable_clusters =
|
||||
size_to_clusters(s, reftable_size * REFTABLE_ENTRY_SIZE);
|
||||
/* Not pretty but simple */
|
||||
if (first_free_cluster < post_refblock_start) {
|
||||
first_free_cluster = post_refblock_start;
|
||||
}
|
||||
reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
|
||||
refcount_table, nb_clusters,
|
||||
&first_free_cluster);
|
||||
if (reftable_offset < 0) {
|
||||
fprintf(stderr, "ERROR allocating reftable: %s\n",
|
||||
strerror(-reftable_offset));
|
||||
res->check_errors++;
|
||||
ret = reftable_offset;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
goto write_refblocks;
|
||||
}
|
||||
|
||||
for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
|
||||
cpu_to_be64s(&on_disk_reftable[refblock_index]);
|
||||
}
|
||||
|
||||
ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
|
||||
reftable_size * REFTABLE_ENTRY_SIZE,
|
||||
ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset, reftable_length,
|
||||
false);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
|
||||
error_setg_errno(errp, -ret, "ERROR writing reftable");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
assert(reftable_size < INT_MAX / REFTABLE_ENTRY_SIZE);
|
||||
assert(reftable_length < INT_MAX);
|
||||
ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
|
||||
reftable_size * REFTABLE_ENTRY_SIZE);
|
||||
reftable_length);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
|
||||
error_setg_errno(errp, -ret, "ERROR writing reftable");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Enter new reftable into the image header */
|
||||
reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
|
||||
reftable_offset_and_clusters.reftable_clusters =
|
||||
cpu_to_be32(size_to_clusters(s, reftable_size * REFTABLE_ENTRY_SIZE));
|
||||
cpu_to_be32(reftable_clusters);
|
||||
ret = bdrv_pwrite_sync(bs->file,
|
||||
offsetof(QCowHeader, refcount_table_offset),
|
||||
&reftable_offset_and_clusters,
|
||||
sizeof(reftable_offset_and_clusters));
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret));
|
||||
error_setg_errno(errp, -ret, "ERROR setting reftable");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
|
||||
for (refblock_index = 0; refblock_index < on_disk_reftable_entries;
|
||||
refblock_index++)
|
||||
{
|
||||
be64_to_cpus(&on_disk_reftable[refblock_index]);
|
||||
}
|
||||
s->refcount_table = on_disk_reftable;
|
||||
s->refcount_table_offset = reftable_offset;
|
||||
s->refcount_table_size = reftable_size;
|
||||
s->refcount_table_size = on_disk_reftable_entries;
|
||||
update_max_refcount_table_index(s);
|
||||
|
||||
return 0;
|
||||
@ -2673,11 +2817,13 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||
if (rebuild && (fix & BDRV_FIX_ERRORS)) {
|
||||
BdrvCheckResult old_res = *res;
|
||||
int fresh_leaks = 0;
|
||||
Error *local_err = NULL;
|
||||
|
||||
fprintf(stderr, "Rebuilding refcount structure\n");
|
||||
ret = rebuild_refcount_structure(bs, res, &refcount_table,
|
||||
&nb_clusters);
|
||||
&nb_clusters, &local_err);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/memalign.h"
|
||||
|
||||
static void qcow2_free_single_snapshot(BlockDriverState *bs, int i)
|
||||
{
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "qemu/option_int.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "qapi/qobject-input-visitor.h"
|
||||
#include "qapi/qapi-visit-block-core.h"
|
||||
#include "crypto.h"
|
||||
|
@ -838,7 +838,7 @@ int qcow2_update_header(BlockDriverState *bs);
|
||||
|
||||
void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
|
||||
int64_t size, const char *message_format, ...)
|
||||
GCC_FMT_ATTR(5, 6);
|
||||
G_GNUC_PRINTF(5, 6);
|
||||
|
||||
int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t entries, size_t entry_len,
|
||||
|
@ -51,6 +51,7 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "trace.h"
|
||||
#include "qed.h"
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "qemu/sockets.h" /* for EINPROGRESS on Windows */
|
||||
#include "qed.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/memalign.h"
|
||||
|
||||
/* Called with table_lock held. */
|
||||
static int coroutine_fn qed_read_table(BDRVQEDState *s, uint64_t offset,
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "trace.h"
|
||||
#include "qed.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user