Merge
This commit is contained in:
commit
5bd9736af0
3
.gitignore
vendored
3
.gitignore
vendored
@ -3,10 +3,13 @@
|
||||
/GNUmakefile
|
||||
/build/
|
||||
/.cache/
|
||||
/.vscode/
|
||||
*.pyc
|
||||
.sdk
|
||||
.stgit-*
|
||||
.git-submodule-status
|
||||
.clang-format
|
||||
.gdb_history
|
||||
cscope.*
|
||||
tags
|
||||
TAGS
|
||||
|
@ -327,6 +327,7 @@ clang-user:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-user-cross-container
|
||||
timeout: 70m
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
|
||||
|
@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip-3.8'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
||||
|
@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip-3.8'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
||||
|
80
MAINTAINERS
80
MAINTAINERS
@ -84,7 +84,6 @@ T: git https://github.com/vivier/qemu.git trivial-patches
|
||||
Architecture support
|
||||
--------------------
|
||||
S390 general architecture support
|
||||
M: Cornelia Huck <cohuck@redhat.com>
|
||||
M: Thomas Huth <thuth@redhat.com>
|
||||
S: Supported
|
||||
F: configs/devices/s390x-softmmu/default.mak
|
||||
@ -106,7 +105,6 @@ F: docs/system/target-s390x.rst
|
||||
F: docs/system/s390x/
|
||||
F: tests/migration/s390x/
|
||||
K: ^Subject:.*(?i)s390x?
|
||||
T: git https://gitlab.com/cohuck/qemu.git s390-next
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
||||
MIPS general architecture support
|
||||
@ -239,16 +237,10 @@ R: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
|
||||
S: Odd Fixes
|
||||
F: target/mips/
|
||||
F: disas/mips.c
|
||||
F: disas/*mips.c
|
||||
F: docs/system/cpu-models-mips.rst.inc
|
||||
F: tests/tcg/mips/
|
||||
|
||||
MIPS TCG CPUs (nanoMIPS ISA)
|
||||
M: Stefan Pejic <stefan.pejic@syrmia.com>
|
||||
S: Maintained
|
||||
F: disas/nanomips.*
|
||||
F: target/mips/tcg/*nanomips*
|
||||
|
||||
NiosII TCG CPUs
|
||||
M: Chris Wulff <crwulff@gmail.com>
|
||||
M: Marek Vasut <marex@denx.de>
|
||||
@ -305,6 +297,7 @@ F: target/rx/
|
||||
S390 TCG CPUs
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
R: Ilya Leoshkevich <iii@linux.ibm.com>
|
||||
S: Maintained
|
||||
F: target/s390x/
|
||||
F: target/s390x/tcg
|
||||
@ -548,12 +541,14 @@ F: */*win32*
|
||||
F: include/*/*win32*
|
||||
X: qga/*win32*
|
||||
F: qemu.nsi
|
||||
F: scripts/nsis.py
|
||||
|
||||
Darwin (macOS, iOS)
|
||||
M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
S: Odd Fixes
|
||||
F: .gitlab-ci.d/cirrus/macos-*
|
||||
F: */*.m
|
||||
F: scripts/entitlement.sh
|
||||
|
||||
Alpha Machines
|
||||
--------------
|
||||
@ -1328,6 +1323,7 @@ F: hw/nvram/mac_nvram.c
|
||||
F: hw/input/adb*
|
||||
F: include/hw/misc/macio/
|
||||
F: include/hw/misc/mos6522.h
|
||||
F: include/hw/nvram/mac_nvram.h
|
||||
F: include/hw/ppc/mac_dbdma.h
|
||||
F: include/hw/pci-host/uninorth.h
|
||||
F: include/hw/input/adb*
|
||||
@ -1345,6 +1341,7 @@ F: hw/intc/heathrow_pic.c
|
||||
F: hw/input/adb*
|
||||
F: include/hw/intc/heathrow_pic.h
|
||||
F: include/hw/input/adb*
|
||||
F: include/hw/pci-host/grackle.h
|
||||
F: pc-bios/qemu_vga.ndrv
|
||||
|
||||
PReP
|
||||
@ -1832,6 +1829,13 @@ F: qapi/pci.json
|
||||
F: docs/pci*
|
||||
F: docs/specs/*pci*
|
||||
|
||||
PCIE DOE
|
||||
M: Huai-Cheng Kuo <hchkuo@avery-design.com.tw>
|
||||
M: Chris Browy <cbrowy@avery-design.com>
|
||||
S: Supported
|
||||
F: include/hw/pci/pcie_doe.h
|
||||
F: hw/pci/pcie_doe.c
|
||||
|
||||
ACPI/SMBIOS
|
||||
M: Michael S. Tsirkin <mst@redhat.com>
|
||||
M: Igor Mammedov <imammedo@redhat.com>
|
||||
@ -1859,6 +1863,13 @@ S: Supported
|
||||
F: hw/acpi/viot.c
|
||||
F: hw/acpi/viot.h
|
||||
|
||||
ACPI/AVOCADO/BIOSBITS
|
||||
M: Ani Sinha <ani@anisinha.ca>
|
||||
S: Supported
|
||||
F: tests/avocado/acpi-bits/*
|
||||
F: tests/avocado/acpi-bits.py
|
||||
F: docs/devel/acpi-bits.rst
|
||||
|
||||
ACPI/HEST/GHES
|
||||
R: Dongjiu Geng <gengdongjiu1@gmail.com>
|
||||
L: qemu-arm@nongnu.org
|
||||
@ -1915,7 +1926,7 @@ SSI
|
||||
M: Alistair Francis <alistair@alistair23.me>
|
||||
S: Maintained
|
||||
F: hw/ssi/*
|
||||
F: hw/block/m25p80.c
|
||||
F: hw/block/m25p80*
|
||||
F: include/hw/ssi/ssi.h
|
||||
X: hw/ssi/xilinx_*
|
||||
F: tests/qtest/m25p80-test.c
|
||||
@ -2000,6 +2011,7 @@ S: Supported
|
||||
F: hw/*/virtio*
|
||||
F: hw/virtio/Makefile.objs
|
||||
F: hw/virtio/trace-events
|
||||
F: qapi/virtio.json
|
||||
F: net/vhost-user.c
|
||||
F: include/hw/virtio/
|
||||
|
||||
@ -2509,7 +2521,10 @@ S: Supported
|
||||
F: block*
|
||||
F: block/
|
||||
F: hw/block/
|
||||
F: qapi/block*.json
|
||||
F: qapi/transaction.json
|
||||
F: include/block/
|
||||
F: include/sysemu/block-*.h
|
||||
F: qemu-img*
|
||||
F: docs/tools/qemu-img.rst
|
||||
F: qemu-io*
|
||||
@ -2582,16 +2597,6 @@ F: include/qemu/co-shared-resource.h
|
||||
T: git https://gitlab.com/jsnow/qemu.git jobs
|
||||
T: git https://gitlab.com/vsementsov/qemu.git block
|
||||
|
||||
Block QAPI, monitor, command line
|
||||
M: Markus Armbruster <armbru@redhat.com>
|
||||
S: Supported
|
||||
F: blockdev.c
|
||||
F: blockdev-hmp-cmds.c
|
||||
F: block/qapi.c
|
||||
F: qapi/block*.json
|
||||
F: qapi/transaction.json
|
||||
T: git https://repo.or.cz/qemu/armbru.git block-next
|
||||
|
||||
Compute Express Link
|
||||
M: Ben Widawsky <ben.widawsky@intel.com>
|
||||
M: Jonathan Cameron <jonathan.cameron@huawei.com>
|
||||
@ -2693,6 +2698,7 @@ F: gdbstub/*
|
||||
F: include/exec/gdbstub.h
|
||||
F: gdb-xml/
|
||||
F: tests/tcg/multiarch/gdbstub/
|
||||
F: scripts/feature_to_c.sh
|
||||
|
||||
Memory API
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
@ -3378,7 +3384,7 @@ L: qemu-s390x@nongnu.org
|
||||
|
||||
SPARC TCG target
|
||||
S: Odd Fixes
|
||||
F: tcg/sparc/
|
||||
F: tcg/sparc64/
|
||||
F: disas/sparc.c
|
||||
|
||||
TCI TCG target
|
||||
@ -3415,6 +3421,12 @@ L: qemu-block@nongnu.org
|
||||
S: Maintained
|
||||
F: block/vdi.c
|
||||
|
||||
blkio
|
||||
M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Maintained
|
||||
F: block/blkio.c
|
||||
|
||||
iSCSI
|
||||
M: Ronnie Sahlberg <ronniesahlberg@gmail.com>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
@ -3726,8 +3738,7 @@ Guest Test Compilation Support
|
||||
M: Alex Bennée <alex.bennee@linaro.org>
|
||||
R: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
S: Maintained
|
||||
F: tests/tcg/Makefile
|
||||
F: tests/tcg/Makefile.include
|
||||
F: tests/tcg/Makefile.target
|
||||
|
||||
Integration Testing with the Avocado framework
|
||||
W: https://trello.com/b/6Qi1pxVn/avocado-qemu
|
||||
@ -3764,6 +3775,29 @@ F: docs/about/deprecated.rst
|
||||
|
||||
Build System
|
||||
------------
|
||||
Meson
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
R: Marc-André Lureau <marcandre.lureau@redhat.com>
|
||||
R: Daniel P. Berrange <berrange@redhat.com>
|
||||
R: Thomas Huth <thuth@redhat.com>
|
||||
R: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
S: Maintained
|
||||
F: meson.build
|
||||
F: meson_options.txt
|
||||
F: scripts/meson-buildoptions.*
|
||||
F: scripts/check_sparse.py
|
||||
F: scripts/symlink-install-tree.py
|
||||
|
||||
Top Level Makefile and configure
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
R: Alex Bennée <alex.bennee@linaro.org>
|
||||
R: Thomas Huth <thuth@redhat.com>
|
||||
S: Maintained
|
||||
F: Makefile
|
||||
F: configure
|
||||
F: scripts/mtest2make.py
|
||||
F: tests/Makefile.include
|
||||
|
||||
GIT submodules
|
||||
M: Daniel P. Berrange <berrange@redhat.com>
|
||||
S: Odd Fixes
|
||||
|
@ -66,6 +66,7 @@ void accel_init_ops_interfaces(AccelClass *ac)
|
||||
{
|
||||
const char *ac_name;
|
||||
char *ops_name;
|
||||
ObjectClass *oc;
|
||||
AccelOpsClass *ops;
|
||||
|
||||
ac_name = object_class_get_name(OBJECT_CLASS(ac));
|
||||
@ -73,8 +74,13 @@ void accel_init_ops_interfaces(AccelClass *ac)
|
||||
|
||||
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
|
||||
ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name));
|
||||
oc = module_object_class_by_name(ops_name);
|
||||
if (!oc) {
|
||||
error_report("fatal: could not load module for type '%s'", ops_name);
|
||||
exit(1);
|
||||
}
|
||||
g_free(ops_name);
|
||||
|
||||
ops = ACCEL_OPS_CLASS(oc);
|
||||
/*
|
||||
* all accelerators need to define ops, providing at least a mandatory
|
||||
* non-NULL create_vcpu_thread operation.
|
||||
|
@ -21,8 +21,6 @@
|
||||
static void *dummy_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
sigset_t waitset;
|
||||
int r;
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
@ -32,8 +30,13 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
#ifndef _WIN32
|
||||
sigset_t waitset;
|
||||
int r;
|
||||
|
||||
sigemptyset(&waitset);
|
||||
sigaddset(&waitset, SIG_IPI);
|
||||
#endif
|
||||
|
||||
/* signal CPU creation */
|
||||
cpu_thread_signal_created(cpu);
|
||||
@ -41,6 +44,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
|
||||
do {
|
||||
qemu_mutex_unlock_iothread();
|
||||
#ifndef _WIN32
|
||||
do {
|
||||
int sig;
|
||||
r = sigwait(&waitset, &sig);
|
||||
@ -49,6 +53,9 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
perror("sigwait");
|
||||
exit(1);
|
||||
}
|
||||
#else
|
||||
qemu_sem_wait(&cpu->sem);
|
||||
#endif
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug);
|
||||
@ -69,4 +76,7 @@ void dummy_start_vcpu_thread(CPUState *cpu)
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
|
||||
QEMU_THREAD_JOINABLE);
|
||||
#ifdef _WIN32
|
||||
qemu_sem_init(&cpu->sem, 0);
|
||||
#endif
|
||||
}
|
||||
|
@ -16,5 +16,5 @@ dummy_ss.add(files(
|
||||
'dummy-cpus.c',
|
||||
))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], if_true: dummy_ss)
|
||||
specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: dummy_ss)
|
||||
specific_ss.add_all(when: ['CONFIG_XEN'], if_true: dummy_ss)
|
||||
|
@ -1,2 +1 @@
|
||||
qtest_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'],
|
||||
if_true: files('qtest.c'))
|
||||
qtest_module_ss.add(when: ['CONFIG_SOFTMMU'], if_true: files('qtest.c'))
|
||||
|
@ -71,7 +71,7 @@ void cpu_loop_exit(CPUState *cpu)
|
||||
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
||||
{
|
||||
if (pc) {
|
||||
cpu_restore_state(cpu, pc, true);
|
||||
cpu_restore_state(cpu, pc);
|
||||
}
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
@ -187,13 +187,14 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
const struct tb_desc *desc = d;
|
||||
|
||||
if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
|
||||
tb->page_addr[0] == desc->page_addr0 &&
|
||||
tb_page_addr0(tb) == desc->page_addr0 &&
|
||||
tb->cs_base == desc->cs_base &&
|
||||
tb->flags == desc->flags &&
|
||||
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
|
||||
tb_cflags(tb) == desc->cflags) {
|
||||
/* check next page if needed */
|
||||
if (tb->page_addr[1] == -1) {
|
||||
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
|
||||
if (tb_phys_page1 == -1) {
|
||||
return true;
|
||||
} else {
|
||||
tb_page_addr_t phys_page1;
|
||||
@ -210,7 +211,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
*/
|
||||
virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
|
||||
phys_page1 = get_page_addr_code(desc->env, virt_page1);
|
||||
if (tb->page_addr[1] == phys_page1) {
|
||||
if (tb_phys_page1 == phys_page1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -304,16 +305,12 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
|
||||
}
|
||||
}
|
||||
|
||||
static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
|
||||
uint32_t *cflags)
|
||||
static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
|
||||
uint32_t *cflags)
|
||||
{
|
||||
CPUBreakpoint *bp;
|
||||
bool match_page = false;
|
||||
|
||||
if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Singlestep overrides breakpoints.
|
||||
* This requirement is visible in the record-replay tests, where
|
||||
@ -374,6 +371,13 @@ static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
|
||||
uint32_t *cflags)
|
||||
{
|
||||
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
|
||||
check_for_breakpoints_slow(cpu, pc, cflags);
|
||||
}
|
||||
|
||||
/**
|
||||
* helper_lookup_tb_ptr: quick check for next tb
|
||||
* @env: current cpu state
|
||||
@ -1044,7 +1048,7 @@ int cpu_exec(CPUState *cpu)
|
||||
* direct jump to a TB spanning two pages because the mapping
|
||||
* for the second page can change.
|
||||
*/
|
||||
if (tb->page_addr[1] != -1) {
|
||||
if (tb_page_addr1(tb) != -1) {
|
||||
last_tb = NULL;
|
||||
}
|
||||
#endif
|
||||
@ -1101,23 +1105,25 @@ void tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
cc->tcg_ops->initialize();
|
||||
tcg_target_initialized = true;
|
||||
}
|
||||
tlb_init(cpu);
|
||||
qemu_plugin_vcpu_init_hook(cpu);
|
||||
|
||||
cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1);
|
||||
tlb_init(cpu);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
tcg_iommu_init_notifier_list(cpu);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
|
||||
}
|
||||
|
||||
/* undo the initializations in reverse order */
|
||||
void tcg_exec_unrealizefn(CPUState *cpu)
|
||||
{
|
||||
qemu_plugin_vcpu_exit_hook(cpu);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
tcg_iommu_free_notifier_list(cpu);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
qemu_plugin_vcpu_exit_hook(cpu);
|
||||
tlb_destroy(cpu);
|
||||
g_free(cpu->tb_jmp_cache);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
@ -11,12 +11,103 @@
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
/*
|
||||
* Access to the various translations structures need to be serialised
|
||||
* via locks for consistency. In user-mode emulation access to the
|
||||
* memory related structures are protected with mmap_lock.
|
||||
* In !user-mode we use per-page locks.
|
||||
*/
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
#define assert_memory_lock()
|
||||
#else
|
||||
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
||||
#endif
|
||||
|
||||
typedef struct PageDesc {
|
||||
/* list of TBs intersecting this ram page */
|
||||
uintptr_t first_tb;
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
unsigned long flags;
|
||||
void *target_data;
|
||||
#endif
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
QemuSpin lock;
|
||||
#endif
|
||||
} PageDesc;
|
||||
|
||||
/* Size of the L2 (and L3, etc) page tables. */
|
||||
#define V_L2_BITS 10
|
||||
#define V_L2_SIZE (1 << V_L2_BITS)
|
||||
|
||||
/*
|
||||
* L1 Mapping properties
|
||||
*/
|
||||
extern int v_l1_size;
|
||||
extern int v_l1_shift;
|
||||
extern int v_l2_levels;
|
||||
|
||||
/*
|
||||
* The bottom level has pointers to PageDesc, and is indexed by
|
||||
* anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
|
||||
*/
|
||||
#define V_L1_MIN_BITS 4
|
||||
#define V_L1_MAX_BITS (V_L2_BITS + 3)
|
||||
#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
|
||||
|
||||
extern void *l1_map[V_L1_MAX_SIZE];
|
||||
|
||||
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
|
||||
|
||||
static inline PageDesc *page_find(tb_page_addr_t index)
|
||||
{
|
||||
return page_find_alloc(index, false);
|
||||
}
|
||||
|
||||
/* list iterators for lists of tagged pointers in TranslationBlock */
|
||||
#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
|
||||
for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
|
||||
tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
|
||||
tb = (TranslationBlock *)((uintptr_t)tb & ~1))
|
||||
|
||||
#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
|
||||
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
|
||||
|
||||
#define TB_FOR_EACH_JMP(head_tb, tb, n) \
|
||||
TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
|
||||
|
||||
/* In user-mode page locks aren't used; mmap_lock is enough */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
|
||||
static inline void page_lock(PageDesc *pd) { }
|
||||
static inline void page_unlock(PageDesc *pd) { }
|
||||
#else
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
void do_assert_page_locked(const PageDesc *pd, const char *file, int line);
|
||||
#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
|
||||
#else
|
||||
#define assert_page_locked(pd)
|
||||
#endif
|
||||
void page_lock(PageDesc *pd);
|
||||
void page_unlock(PageDesc *pd);
|
||||
#endif
|
||||
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
|
||||
void assert_no_pages_locked(void);
|
||||
#else
|
||||
static inline void assert_no_pages_locked(void) { }
|
||||
#endif
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
|
||||
target_ulong cs_base, uint32_t flags,
|
||||
int cflags);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
void page_init(void);
|
||||
void tb_htable_init(void);
|
||||
void tb_reset_jump(TranslationBlock *tb, int n);
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||
tb_page_addr_t phys_page2);
|
||||
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
|
||||
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc);
|
||||
|
||||
/* Return the current PC from CPU, which may be cached in TB. */
|
||||
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
|
@ -3,6 +3,7 @@ tcg_ss.add(files(
|
||||
'tcg-all.c',
|
||||
'cpu-exec-common.c',
|
||||
'cpu-exec.c',
|
||||
'tb-maint.c',
|
||||
'tcg-runtime-gvec.c',
|
||||
'tcg-runtime.c',
|
||||
'translate-all.c',
|
||||
|
704
accel/tcg/tb-maint.c
Normal file
704
accel/tcg/tb-maint.c
Normal file
@ -0,0 +1,704 @@
|
||||
/*
|
||||
* Translation Block Maintaince
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "exec/cputlb.h"
|
||||
#include "exec/log.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
static bool tb_cmp(const void *ap, const void *bp)
|
||||
{
|
||||
const TranslationBlock *a = ap;
|
||||
const TranslationBlock *b = bp;
|
||||
|
||||
return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
|
||||
a->cs_base == b->cs_base &&
|
||||
a->flags == b->flags &&
|
||||
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
|
||||
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
|
||||
tb_page_addr0(a) == tb_page_addr0(b) &&
|
||||
tb_page_addr1(a) == tb_page_addr1(b));
|
||||
}
|
||||
|
||||
void tb_htable_init(void)
|
||||
{
|
||||
unsigned int mode = QHT_MODE_AUTO_RESIZE;
|
||||
|
||||
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
|
||||
}
|
||||
|
||||
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
|
||||
static void page_flush_tb_1(int level, void **lp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (*lp == NULL) {
|
||||
return;
|
||||
}
|
||||
if (level == 0) {
|
||||
PageDesc *pd = *lp;
|
||||
|
||||
for (i = 0; i < V_L2_SIZE; ++i) {
|
||||
page_lock(&pd[i]);
|
||||
pd[i].first_tb = (uintptr_t)NULL;
|
||||
page_unlock(&pd[i]);
|
||||
}
|
||||
} else {
|
||||
void **pp = *lp;
|
||||
|
||||
for (i = 0; i < V_L2_SIZE; ++i) {
|
||||
page_flush_tb_1(level - 1, pp + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void page_flush_tb(void)
|
||||
{
|
||||
int i, l1_sz = v_l1_size;
|
||||
|
||||
for (i = 0; i < l1_sz; i++) {
|
||||
page_flush_tb_1(v_l2_levels, l1_map + i);
|
||||
}
|
||||
}
|
||||
|
||||
/* flush all the translation blocks */
|
||||
static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
||||
{
|
||||
bool did_flush = false;
|
||||
|
||||
mmap_lock();
|
||||
/* If it is already been done on request of another CPU, just retry. */
|
||||
if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
|
||||
goto done;
|
||||
}
|
||||
did_flush = true;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
tcg_flush_jmp_cache(cpu);
|
||||
}
|
||||
|
||||
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
|
||||
page_flush_tb();
|
||||
|
||||
tcg_region_reset_all();
|
||||
/* XXX: flush processor icache at this point if cache flush is expensive */
|
||||
qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
|
||||
|
||||
done:
|
||||
mmap_unlock();
|
||||
if (did_flush) {
|
||||
qemu_plugin_flush_cb();
|
||||
}
|
||||
}
|
||||
|
||||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
|
||||
|
||||
if (cpu_in_exclusive_context(cpu)) {
|
||||
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
} else {
|
||||
async_safe_run_on_cpu(cpu, do_tb_flush,
|
||||
RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* user-mode: call with mmap_lock held
|
||||
* !user-mode: call with @pd->lock held
|
||||
*/
|
||||
static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
|
||||
{
|
||||
TranslationBlock *tb1;
|
||||
uintptr_t *pprev;
|
||||
unsigned int n1;
|
||||
|
||||
assert_page_locked(pd);
|
||||
pprev = &pd->first_tb;
|
||||
PAGE_FOR_EACH_TB(pd, tb1, n1) {
|
||||
if (tb1 == tb) {
|
||||
*pprev = tb1->page_next[n1];
|
||||
return;
|
||||
}
|
||||
pprev = &tb1->page_next[n1];
|
||||
}
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
/* remove @orig from its @n_orig-th jump list */
|
||||
static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
|
||||
{
|
||||
uintptr_t ptr, ptr_locked;
|
||||
TranslationBlock *dest;
|
||||
TranslationBlock *tb;
|
||||
uintptr_t *pprev;
|
||||
int n;
|
||||
|
||||
/* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
|
||||
ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
|
||||
dest = (TranslationBlock *)(ptr & ~1);
|
||||
if (dest == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_spin_lock(&dest->jmp_lock);
|
||||
/*
|
||||
* While acquiring the lock, the jump might have been removed if the
|
||||
* destination TB was invalidated; check again.
|
||||
*/
|
||||
ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
|
||||
if (ptr_locked != ptr) {
|
||||
qemu_spin_unlock(&dest->jmp_lock);
|
||||
/*
|
||||
* The only possibility is that the jump was unlinked via
|
||||
* tb_jump_unlink(dest). Seeing here another destination would be a bug,
|
||||
* because we set the LSB above.
|
||||
*/
|
||||
g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* We first acquired the lock, and since the destination pointer matches,
|
||||
* we know for sure that @orig is in the jmp list.
|
||||
*/
|
||||
pprev = &dest->jmp_list_head;
|
||||
TB_FOR_EACH_JMP(dest, tb, n) {
|
||||
if (tb == orig && n == n_orig) {
|
||||
*pprev = tb->jmp_list_next[n];
|
||||
/* no need to set orig->jmp_dest[n]; setting the LSB was enough */
|
||||
qemu_spin_unlock(&dest->jmp_lock);
|
||||
return;
|
||||
}
|
||||
pprev = &tb->jmp_list_next[n];
|
||||
}
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the jump entry 'n' of a TB so that it is not chained to another TB.
|
||||
*/
|
||||
void tb_reset_jump(TranslationBlock *tb, int n)
|
||||
{
|
||||
uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
|
||||
tb_set_jmp_target(tb, n, addr);
|
||||
}
|
||||
|
||||
/* remove any jumps to the TB */
|
||||
static inline void tb_jmp_unlink(TranslationBlock *dest)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int n;
|
||||
|
||||
qemu_spin_lock(&dest->jmp_lock);
|
||||
|
||||
TB_FOR_EACH_JMP(dest, tb, n) {
|
||||
tb_reset_jump(tb, n);
|
||||
qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
|
||||
/* No need to clear the list entry; setting the dest ptr is enough */
|
||||
}
|
||||
dest->jmp_list_head = (uintptr_t)NULL;
|
||||
|
||||
qemu_spin_unlock(&dest->jmp_lock);
|
||||
}
|
||||
|
||||
static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
if (TARGET_TB_PCREL) {
|
||||
/* A TB may be at any virtual address */
|
||||
CPU_FOREACH(cpu) {
|
||||
tcg_flush_jmp_cache(cpu);
|
||||
}
|
||||
} else {
|
||||
uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUJumpCache *jc = cpu->tb_jmp_cache;
|
||||
|
||||
if (qatomic_read(&jc->array[h].tb) == tb) {
|
||||
qatomic_set(&jc->array[h].tb, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In user-mode, call with mmap_lock held.
|
||||
* In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
|
||||
* locks held.
|
||||
*/
|
||||
static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
||||
{
|
||||
PageDesc *p;
|
||||
uint32_t h;
|
||||
tb_page_addr_t phys_pc;
|
||||
uint32_t orig_cflags = tb_cflags(tb);
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
/* make sure no further incoming jumps will be chained to this TB */
|
||||
qemu_spin_lock(&tb->jmp_lock);
|
||||
qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
|
||||
qemu_spin_unlock(&tb->jmp_lock);
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
phys_pc = tb_page_addr0(tb);
|
||||
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
|
||||
tb->flags, orig_cflags, tb->trace_vcpu_dstate);
|
||||
if (!qht_remove(&tb_ctx.htable, tb, h)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* remove the TB from the page list */
|
||||
if (rm_from_page_list) {
|
||||
p = page_find(phys_pc >> TARGET_PAGE_BITS);
|
||||
tb_page_remove(p, tb);
|
||||
phys_pc = tb_page_addr1(tb);
|
||||
if (phys_pc != -1) {
|
||||
p = page_find(phys_pc >> TARGET_PAGE_BITS);
|
||||
tb_page_remove(p, tb);
|
||||
}
|
||||
}
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
tb_jmp_cache_inval_tb(tb);
|
||||
|
||||
/* suppress this TB from the two jump lists */
|
||||
tb_remove_from_jmp_list(tb, 0);
|
||||
tb_remove_from_jmp_list(tb, 1);
|
||||
|
||||
/* suppress any remaining jumps to this TB */
|
||||
tb_jmp_unlink(tb);
|
||||
|
||||
qatomic_set(&tb_ctx.tb_phys_invalidate_count,
|
||||
tb_ctx.tb_phys_invalidate_count + 1);
|
||||
}
|
||||
|
||||
static void tb_phys_invalidate__locked(TranslationBlock *tb)
|
||||
{
|
||||
qemu_thread_jit_write();
|
||||
do_tb_phys_invalidate(tb, true);
|
||||
qemu_thread_jit_execute();
|
||||
}
|
||||
|
||||
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
|
||||
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
|
||||
{
|
||||
PageDesc *p1, *p2;
|
||||
tb_page_addr_t page1;
|
||||
tb_page_addr_t page2;
|
||||
|
||||
assert_memory_lock();
|
||||
g_assert(phys1 != -1);
|
||||
|
||||
page1 = phys1 >> TARGET_PAGE_BITS;
|
||||
page2 = phys2 >> TARGET_PAGE_BITS;
|
||||
|
||||
p1 = page_find_alloc(page1, alloc);
|
||||
if (ret_p1) {
|
||||
*ret_p1 = p1;
|
||||
}
|
||||
if (likely(phys2 == -1)) {
|
||||
page_lock(p1);
|
||||
return;
|
||||
} else if (page1 == page2) {
|
||||
page_lock(p1);
|
||||
if (ret_p2) {
|
||||
*ret_p2 = p1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
p2 = page_find_alloc(page2, alloc);
|
||||
if (ret_p2) {
|
||||
*ret_p2 = p2;
|
||||
}
|
||||
if (page1 < page2) {
|
||||
page_lock(p1);
|
||||
page_lock(p2);
|
||||
} else {
|
||||
page_lock(p2);
|
||||
page_lock(p1);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline void page_lock_tb(const TranslationBlock *tb) { }
|
||||
static inline void page_unlock_tb(const TranslationBlock *tb) { }
|
||||
#else
|
||||
/* lock the page(s) of a TB in the correct acquisition order */
|
||||
static void page_lock_tb(const TranslationBlock *tb)
|
||||
{
|
||||
page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
|
||||
}
|
||||
|
||||
static void page_unlock_tb(const TranslationBlock *tb)
|
||||
{
|
||||
PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
|
||||
|
||||
page_unlock(p1);
|
||||
if (unlikely(tb_page_addr1(tb) != -1)) {
|
||||
PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
|
||||
|
||||
if (p2 != p1) {
|
||||
page_unlock(p2);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Invalidate one TB.
|
||||
* Called with mmap_lock held in user-mode.
|
||||
*/
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
{
|
||||
if (page_addr == -1 && tb_page_addr0(tb) != -1) {
|
||||
page_lock_tb(tb);
|
||||
do_tb_phys_invalidate(tb, true);
|
||||
page_unlock_tb(tb);
|
||||
} else {
|
||||
do_tb_phys_invalidate(tb, false);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the tb in the target page and protect it if necessary.
|
||||
* Called with mmap_lock held for user-mode emulation.
|
||||
* Called with @p->lock held in !user-mode.
|
||||
*/
|
||||
static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
|
||||
unsigned int n, tb_page_addr_t page_addr)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool page_already_protected;
|
||||
#endif
|
||||
|
||||
assert_page_locked(p);
|
||||
|
||||
tb->page_next[n] = p->first_tb;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
page_already_protected = p->first_tb != (uintptr_t)NULL;
|
||||
#endif
|
||||
p->first_tb = (uintptr_t)tb | n;
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* translator_loop() must have made all TB pages non-writable */
|
||||
assert(!(p->flags & PAGE_WRITE));
|
||||
#else
|
||||
/*
|
||||
* If some code is already present, then the pages are already
|
||||
* protected. So we handle the case where only the first TB is
|
||||
* allocated in a physical page.
|
||||
*/
|
||||
if (!page_already_protected) {
|
||||
tlb_protect_code(page_addr);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a new TB and link it to the physical page tables. phys_page2 is
|
||||
* (-1) to indicate that only one page contains the TB.
|
||||
*
|
||||
* Called with mmap_lock held for user-mode emulation.
|
||||
*
|
||||
* Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
|
||||
* Note that in !user-mode, another thread might have already added a TB
|
||||
* for the same block of guest code that @tb corresponds to. In that case,
|
||||
* the caller should discard the original @tb, and use instead the returned TB.
|
||||
*/
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||
tb_page_addr_t phys_page2)
|
||||
{
|
||||
PageDesc *p;
|
||||
PageDesc *p2 = NULL;
|
||||
void *existing_tb = NULL;
|
||||
uint32_t h;
|
||||
|
||||
assert_memory_lock();
|
||||
tcg_debug_assert(!(tb->cflags & CF_INVALID));
|
||||
|
||||
/*
|
||||
* Add the TB to the page list, acquiring first the pages's locks.
|
||||
* We keep the locks held until after inserting the TB in the hash table,
|
||||
* so that if the insertion fails we know for sure that the TBs are still
|
||||
* in the page descriptors.
|
||||
* Note that inserting into the hash table first isn't an option, since
|
||||
* we can only insert TBs that are fully initialized.
|
||||
*/
|
||||
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
|
||||
tb_page_add(p, tb, 0, phys_pc);
|
||||
if (p2) {
|
||||
tb_page_add(p2, tb, 1, phys_page2);
|
||||
}
|
||||
|
||||
/* add in the hash table */
|
||||
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
|
||||
tb->flags, tb->cflags, tb->trace_vcpu_dstate);
|
||||
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
|
||||
|
||||
/* remove TB from the page(s) if we couldn't insert it */
|
||||
if (unlikely(existing_tb)) {
|
||||
tb_page_remove(p, tb);
|
||||
if (p2) {
|
||||
tb_page_remove(p2, tb);
|
||||
}
|
||||
tb = existing_tb;
|
||||
}
|
||||
|
||||
if (p2 && p2 != p) {
|
||||
page_unlock(p2);
|
||||
}
|
||||
page_unlock(p);
|
||||
return tb;
|
||||
}
|
||||
|
||||
/*
|
||||
* @p must be non-NULL.
|
||||
* user-mode: call with mmap_lock held.
|
||||
* !user-mode: call with all @pages locked.
|
||||
*/
|
||||
static void
|
||||
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
PageDesc *p, tb_page_addr_t start,
|
||||
tb_page_addr_t end,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
tb_page_addr_t tb_start, tb_end;
|
||||
int n;
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
CPUState *cpu = current_cpu;
|
||||
bool current_tb_not_found = retaddr != 0;
|
||||
bool current_tb_modified = false;
|
||||
TranslationBlock *current_tb = NULL;
|
||||
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||
|
||||
assert_page_locked(p);
|
||||
|
||||
/*
|
||||
* We remove all the TBs in the range [start, end[.
|
||||
* XXX: see if in some cases it could be faster to invalidate all the code
|
||||
*/
|
||||
PAGE_FOR_EACH_TB(p, tb, n) {
|
||||
assert_page_locked(p);
|
||||
/* NOTE: this is subtle as a TB may span two physical pages */
|
||||
if (n == 0) {
|
||||
/* NOTE: tb_end may be after the end of the page, but
|
||||
it is not a problem */
|
||||
tb_start = tb_page_addr0(tb);
|
||||
tb_end = tb_start + tb->size;
|
||||
} else {
|
||||
tb_start = tb_page_addr1(tb);
|
||||
tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
|
||||
& ~TARGET_PAGE_MASK);
|
||||
}
|
||||
if (!(tb_end <= start || tb_start >= end)) {
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (current_tb_not_found) {
|
||||
current_tb_not_found = false;
|
||||
/* now we have a real cpu fault */
|
||||
current_tb = tcg_tb_lookup(retaddr);
|
||||
}
|
||||
if (current_tb == tb &&
|
||||
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
||||
/*
|
||||
* If we are modifying the current TB, we must stop
|
||||
* its execution. We could be more precise by checking
|
||||
* that the modification is after the current PC, but it
|
||||
* would require a specialized function to partially
|
||||
* restore the CPU state.
|
||||
*/
|
||||
current_tb_modified = true;
|
||||
cpu_restore_state_from_tb(cpu, current_tb, retaddr);
|
||||
}
|
||||
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||
tb_phys_invalidate__locked(tb);
|
||||
}
|
||||
}
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* if no code remaining, no need to continue to use slow writes */
|
||||
if (!p->first_tb) {
|
||||
tlb_unprotect_code(start);
|
||||
}
|
||||
#endif
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (current_tb_modified) {
|
||||
page_collection_unlock(pages);
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
mmap_unlock();
|
||||
cpu_loop_exit_noexc(cpu);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all TBs which intersect with the target physical
|
||||
* address page @addr.
|
||||
*
|
||||
* Called with mmap_lock held for user-mode emulation
|
||||
*/
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
struct page_collection *pages;
|
||||
tb_page_addr_t start, end;
|
||||
PageDesc *p;
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (p == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
start = addr & TARGET_PAGE_MASK;
|
||||
end = start + TARGET_PAGE_SIZE;
|
||||
pages = page_collection_lock(start, end);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all TBs which intersect with the target physical address range
|
||||
* [start;end[. NOTE: start and end may refer to *different* physical pages.
|
||||
* 'is_cpu_write_access' should be true if called from a real cpu write
|
||||
* access: the virtual CPU will exit the current TB if code is modified inside
|
||||
* this TB.
|
||||
*
|
||||
* Called with mmap_lock held for user-mode emulation.
|
||||
*/
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
||||
{
|
||||
struct page_collection *pages;
|
||||
tb_page_addr_t next;
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
pages = page_collection_lock(start, end);
|
||||
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
||||
start < end;
|
||||
start = next, next += TARGET_PAGE_SIZE) {
|
||||
PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
|
||||
tb_page_addr_t bound = MIN(next, end);
|
||||
|
||||
if (pd == NULL) {
|
||||
continue;
|
||||
}
|
||||
tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
|
||||
}
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
/*
|
||||
* len must be <= 8 and start must be a multiple of len.
|
||||
* Called via softmmu_template.h when code areas are written to with
|
||||
* iothread mutex not held.
|
||||
*
|
||||
* Call with all @pages in the range [@start, @start + len[ locked.
|
||||
*/
|
||||
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
||||
tb_page_addr_t start, int len,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
PageDesc *p;
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
p = page_find(start >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert_page_locked(p);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
|
||||
retaddr);
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Called with mmap_lock held. If pc is not 0 then it indicates the
|
||||
* host PC of the faulting store instruction that caused this invalidate.
|
||||
* Returns true if the caller needs to abort execution of the current
|
||||
* TB (because it was modified by this store and the guest CPU has
|
||||
* precise-SMC semantics).
|
||||
*/
|
||||
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
PageDesc *p;
|
||||
int n;
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
TranslationBlock *current_tb = NULL;
|
||||
CPUState *cpu = current_cpu;
|
||||
bool current_tb_modified = false;
|
||||
#endif
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
addr &= TARGET_PAGE_MASK;
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (p->first_tb && pc != 0) {
|
||||
current_tb = tcg_tb_lookup(pc);
|
||||
}
|
||||
#endif
|
||||
assert_page_locked(p);
|
||||
PAGE_FOR_EACH_TB(p, tb, n) {
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (current_tb == tb &&
|
||||
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
||||
/*
|
||||
* If we are modifying the current TB, we must stop its execution.
|
||||
* We could be more precise by checking that the modification is
|
||||
* after the current PC, but it would require a specialized
|
||||
* function to partially restore the CPU state.
|
||||
*/
|
||||
current_tb_modified = true;
|
||||
cpu_restore_state_from_tb(cpu, current_tb, pc);
|
||||
}
|
||||
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||
tb_phys_invalidate(tb, addr);
|
||||
}
|
||||
p->first_tb = (uintptr_t)NULL;
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (current_tb_modified) {
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
@ -70,8 +70,6 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
assert(tcg_enabled());
|
||||
g_assert(!icount_enabled());
|
||||
|
||||
tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
|
||||
|
||||
rcu_register_thread();
|
||||
force_rcu.notifier.notify = mttcg_force_rcu;
|
||||
force_rcu.cpu = cpu;
|
||||
@ -151,6 +149,9 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
g_assert(tcg_enabled());
|
||||
tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
|
||||
|
||||
cpu->thread = g_new0(QemuThread, 1);
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
@ -51,7 +51,7 @@ void rr_kick_vcpu_thread(CPUState *unused)
|
||||
*
|
||||
* The kick timer is responsible for moving single threaded vCPU
|
||||
* emulation on to the next vCPU. If more than one vCPU is running a
|
||||
* timer event with force a cpu->exit so the next vCPU can get
|
||||
* timer event we force a cpu->exit so the next vCPU can get
|
||||
* scheduled.
|
||||
*
|
||||
* The timer is removed if all vCPUs are idle and restarted again once
|
||||
@ -257,9 +257,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
Notifier force_rcu;
|
||||
CPUState *cpu = arg;
|
||||
|
||||
g_assert(tcg_enabled());
|
||||
tcg_cpu_init_cflags(cpu, false);
|
||||
|
||||
assert(tcg_enabled());
|
||||
rcu_register_thread();
|
||||
force_rcu.notify = rr_force_rcu;
|
||||
rcu_add_force_rcu_notifier(&force_rcu);
|
||||
@ -394,6 +392,9 @@ void rr_start_vcpu_thread(CPUState *cpu)
|
||||
static QemuCond *single_tcg_halt_cond;
|
||||
static QemuThread *single_tcg_cpu_thread;
|
||||
|
||||
g_assert(tcg_enabled());
|
||||
tcg_cpu_init_cflags(cpu, false);
|
||||
|
||||
if (!single_tcg_cpu_thread) {
|
||||
cpu->thread = g_new0(QemuThread, 1);
|
||||
cpu->halt_cond = g_new0(QemuCond, 1);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -262,7 +262,7 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
|
||||
tb = db->tb;
|
||||
|
||||
/* Use slow path if first page is MMIO. */
|
||||
if (unlikely(tb->page_addr[0] == -1)) {
|
||||
if (unlikely(tb_page_addr0(tb) == -1)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -274,13 +274,14 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
|
||||
host = db->host_addr[1];
|
||||
base = TARGET_PAGE_ALIGN(db->pc_first);
|
||||
if (host == NULL) {
|
||||
tb->page_addr[1] =
|
||||
tb_page_addr_t phys_page =
|
||||
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
|
||||
/* We cannot handle MMIO as second page. */
|
||||
assert(phys_page != -1);
|
||||
tb_set_page_addr1(tb, phys_page);
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
page_protect(end);
|
||||
#endif
|
||||
/* We cannot handle MMIO as second page. */
|
||||
assert(tb->page_addr[1] != -1);
|
||||
host = db->host_addr[1];
|
||||
}
|
||||
|
||||
|
@ -210,6 +210,48 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
|
||||
return addr;
|
||||
}
|
||||
|
||||
void page_reset_target_data(target_ulong start, target_ulong end)
|
||||
{
|
||||
#ifdef TARGET_PAGE_DATA_SIZE
|
||||
target_ulong addr, len;
|
||||
|
||||
/*
|
||||
* This function should never be called with addresses outside the
|
||||
* guest address space. If this assert fires, it probably indicates
|
||||
* a missing call to h2g_valid.
|
||||
*/
|
||||
assert(end - 1 <= GUEST_ADDR_MAX);
|
||||
assert(start < end);
|
||||
assert_memory_lock();
|
||||
|
||||
start = start & TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_ALIGN(end);
|
||||
|
||||
for (addr = start, len = end - start;
|
||||
len != 0;
|
||||
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
|
||||
PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
|
||||
|
||||
g_free(p->target_data);
|
||||
p->target_data = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef TARGET_PAGE_DATA_SIZE
|
||||
void *page_get_target_data(target_ulong address)
|
||||
{
|
||||
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
|
||||
void *ret = p->target_data;
|
||||
|
||||
if (!ret) {
|
||||
ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
|
||||
p->target_data = ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* The softmmu versions of these helpers are in cputlb.c. */
|
||||
|
||||
/*
|
||||
|
@ -73,20 +73,24 @@ void audio_driver_register(audio_driver *drv)
|
||||
audio_driver *audio_driver_lookup(const char *name)
|
||||
{
|
||||
struct audio_driver *d;
|
||||
Error *local_err = NULL;
|
||||
int rv;
|
||||
|
||||
QLIST_FOREACH(d, &audio_drivers, next) {
|
||||
if (strcmp(name, d->name) == 0) {
|
||||
return d;
|
||||
}
|
||||
}
|
||||
|
||||
audio_module_load_one(name);
|
||||
QLIST_FOREACH(d, &audio_drivers, next) {
|
||||
if (strcmp(name, d->name) == 0) {
|
||||
return d;
|
||||
rv = audio_module_load(name, &local_err);
|
||||
if (rv > 0) {
|
||||
QLIST_FOREACH(d, &audio_drivers, next) {
|
||||
if (strcmp(name, d->name) == 0) {
|
||||
return d;
|
||||
}
|
||||
}
|
||||
} else if (rv < 0) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -355,42 +355,62 @@ static int cryptodev_builtin_create_akcipher_session(
|
||||
return index;
|
||||
}
|
||||
|
||||
static int64_t cryptodev_builtin_create_session(
|
||||
static int cryptodev_builtin_create_session(
|
||||
CryptoDevBackend *backend,
|
||||
CryptoDevBackendSessionInfo *sess_info,
|
||||
uint32_t queue_index, Error **errp)
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendBuiltin *builtin =
|
||||
CRYPTODEV_BACKEND_BUILTIN(backend);
|
||||
CryptoDevBackendSymSessionInfo *sym_sess_info;
|
||||
CryptoDevBackendAsymSessionInfo *asym_sess_info;
|
||||
int ret, status;
|
||||
Error *local_error = NULL;
|
||||
|
||||
switch (sess_info->op_code) {
|
||||
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
|
||||
sym_sess_info = &sess_info->u.sym_sess_info;
|
||||
return cryptodev_builtin_create_cipher_session(
|
||||
builtin, sym_sess_info, errp);
|
||||
ret = cryptodev_builtin_create_cipher_session(
|
||||
builtin, sym_sess_info, &local_error);
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
|
||||
asym_sess_info = &sess_info->u.asym_sess_info;
|
||||
return cryptodev_builtin_create_akcipher_session(
|
||||
builtin, asym_sess_info, errp);
|
||||
ret = cryptodev_builtin_create_akcipher_session(
|
||||
builtin, asym_sess_info, &local_error);
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
|
||||
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
|
||||
default:
|
||||
error_setg(errp, "Unsupported opcode :%" PRIu32 "",
|
||||
error_setg(&local_error, "Unsupported opcode :%" PRIu32 "",
|
||||
sess_info->op_code);
|
||||
return -1;
|
||||
return -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
|
||||
return -1;
|
||||
if (local_error) {
|
||||
error_report_err(local_error);
|
||||
}
|
||||
if (ret < 0) {
|
||||
status = -VIRTIO_CRYPTO_ERR;
|
||||
} else {
|
||||
sess_info->session_id = ret;
|
||||
status = VIRTIO_CRYPTO_OK;
|
||||
}
|
||||
if (cb) {
|
||||
cb(opaque, status);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cryptodev_builtin_close_session(
|
||||
CryptoDevBackend *backend,
|
||||
uint64_t session_id,
|
||||
uint32_t queue_index, Error **errp)
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendBuiltin *builtin =
|
||||
CRYPTODEV_BACKEND_BUILTIN(backend);
|
||||
@ -407,6 +427,9 @@ static int cryptodev_builtin_close_session(
|
||||
|
||||
g_free(session);
|
||||
builtin->sessions[session_id] = NULL;
|
||||
if (cb) {
|
||||
cb(opaque, VIRTIO_CRYPTO_OK);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -506,7 +529,9 @@ static int cryptodev_builtin_asym_operation(
|
||||
static int cryptodev_builtin_operation(
|
||||
CryptoDevBackend *backend,
|
||||
CryptoDevBackendOpInfo *op_info,
|
||||
uint32_t queue_index, Error **errp)
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendBuiltin *builtin =
|
||||
CRYPTODEV_BACKEND_BUILTIN(backend);
|
||||
@ -514,11 +539,12 @@ static int cryptodev_builtin_operation(
|
||||
CryptoDevBackendSymOpInfo *sym_op_info;
|
||||
CryptoDevBackendAsymOpInfo *asym_op_info;
|
||||
enum CryptoDevBackendAlgType algtype = op_info->algtype;
|
||||
int ret = -VIRTIO_CRYPTO_ERR;
|
||||
int status = -VIRTIO_CRYPTO_ERR;
|
||||
Error *local_error = NULL;
|
||||
|
||||
if (op_info->session_id >= MAX_NUM_SESSIONS ||
|
||||
builtin->sessions[op_info->session_id] == NULL) {
|
||||
error_setg(errp, "Cannot find a valid session id: %" PRIu64 "",
|
||||
error_setg(&local_error, "Cannot find a valid session id: %" PRIu64 "",
|
||||
op_info->session_id);
|
||||
return -VIRTIO_CRYPTO_INVSESS;
|
||||
}
|
||||
@ -526,14 +552,21 @@ static int cryptodev_builtin_operation(
|
||||
sess = builtin->sessions[op_info->session_id];
|
||||
if (algtype == CRYPTODEV_BACKEND_ALG_SYM) {
|
||||
sym_op_info = op_info->u.sym_op_info;
|
||||
ret = cryptodev_builtin_sym_operation(sess, sym_op_info, errp);
|
||||
status = cryptodev_builtin_sym_operation(sess, sym_op_info,
|
||||
&local_error);
|
||||
} else if (algtype == CRYPTODEV_BACKEND_ALG_ASYM) {
|
||||
asym_op_info = op_info->u.asym_op_info;
|
||||
ret = cryptodev_builtin_asym_operation(sess, op_info->op_code,
|
||||
asym_op_info, errp);
|
||||
status = cryptodev_builtin_asym_operation(sess, op_info->op_code,
|
||||
asym_op_info, &local_error);
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (local_error) {
|
||||
error_report_err(local_error);
|
||||
}
|
||||
if (cb) {
|
||||
cb(opaque, status);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cryptodev_builtin_cleanup(
|
||||
@ -548,7 +581,7 @@ static void cryptodev_builtin_cleanup(
|
||||
|
||||
for (i = 0; i < MAX_NUM_SESSIONS; i++) {
|
||||
if (builtin->sessions[i] != NULL) {
|
||||
cryptodev_builtin_close_session(backend, i, 0, &error_abort);
|
||||
cryptodev_builtin_close_session(backend, i, 0, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
645
backends/cryptodev-lkcf.c
Normal file
645
backends/cryptodev-lkcf.c
Normal file
@ -0,0 +1,645 @@
|
||||
/*
|
||||
* QEMU Cryptodev backend for QEMU cipher APIs
|
||||
*
|
||||
* Copyright (c) 2022 Bytedance.Inc
|
||||
*
|
||||
* Authors:
|
||||
* lei he <helei.sig11@bytedance.com>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "crypto/cipher.h"
|
||||
#include "crypto/akcipher.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qom/object.h"
|
||||
#include "sysemu/cryptodev.h"
|
||||
#include "standard-headers/linux/virtio_crypto.h"
|
||||
|
||||
#include <keyutils.h>
|
||||
#include <sys/eventfd.h>
|
||||
|
||||
/**
|
||||
* @TYPE_CRYPTODEV_BACKEND_LKCF:
|
||||
* name of backend that uses linux kernel crypto framework
|
||||
*/
|
||||
#define TYPE_CRYPTODEV_BACKEND_LKCF "cryptodev-backend-lkcf"
|
||||
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(CryptoDevBackendLKCF, CRYPTODEV_BACKEND_LKCF)
|
||||
|
||||
#define INVALID_KEY_ID -1
|
||||
#define MAX_SESSIONS 256
|
||||
#define NR_WORKER_THREAD 64
|
||||
|
||||
#define KCTL_KEY_TYPE_PKEY "asymmetric"
|
||||
/**
|
||||
* Here the key is uploaded to the thread-keyring of worker thread, at least
|
||||
* util linux-6.0:
|
||||
* 1. process keyring seems to behave unexpectedly if main-thread does not
|
||||
* create the keyring before creating any other thread.
|
||||
* 2. at present, the guest kernel never perform multiple operations on a
|
||||
* session.
|
||||
* 3. it can reduce the load of the main-loop because the key passed by the
|
||||
* guest kernel has been already checked.
|
||||
*/
|
||||
#define KCTL_KEY_RING KEY_SPEC_THREAD_KEYRING
|
||||
|
||||
typedef struct CryptoDevBackendLKCFSession {
|
||||
uint8_t *key;
|
||||
size_t keylen;
|
||||
QCryptoAkCipherKeyType keytype;
|
||||
QCryptoAkCipherOptions akcipher_opts;
|
||||
} CryptoDevBackendLKCFSession;
|
||||
|
||||
typedef struct CryptoDevBackendLKCF CryptoDevBackendLKCF;
|
||||
typedef struct CryptoDevLKCFTask CryptoDevLKCFTask;
|
||||
struct CryptoDevLKCFTask {
|
||||
CryptoDevBackendLKCFSession *sess;
|
||||
CryptoDevBackendOpInfo *op_info;
|
||||
CryptoDevCompletionFunc cb;
|
||||
void *opaque;
|
||||
int status;
|
||||
CryptoDevBackendLKCF *lkcf;
|
||||
QSIMPLEQ_ENTRY(CryptoDevLKCFTask) queue;
|
||||
};
|
||||
|
||||
typedef struct CryptoDevBackendLKCF {
|
||||
CryptoDevBackend parent_obj;
|
||||
CryptoDevBackendLKCFSession *sess[MAX_SESSIONS];
|
||||
QSIMPLEQ_HEAD(, CryptoDevLKCFTask) requests;
|
||||
QSIMPLEQ_HEAD(, CryptoDevLKCFTask) responses;
|
||||
QemuMutex mutex;
|
||||
QemuCond cond;
|
||||
QemuMutex rsp_mutex;
|
||||
|
||||
/**
|
||||
* There is no async interface for asymmetric keys like AF_ALG sockets,
|
||||
* we don't seem to have better way than create a lots of thread.
|
||||
*/
|
||||
QemuThread worker_threads[NR_WORKER_THREAD];
|
||||
bool running;
|
||||
int eventfd;
|
||||
} CryptoDevBackendLKCF;
|
||||
|
||||
static void *cryptodev_lkcf_worker(void *arg);
|
||||
static int cryptodev_lkcf_close_session(CryptoDevBackend *backend,
|
||||
uint64_t session_id,
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque);
|
||||
|
||||
static void cryptodev_lkcf_handle_response(void *opaque)
|
||||
{
|
||||
CryptoDevBackendLKCF *lkcf = (CryptoDevBackendLKCF *)opaque;
|
||||
QSIMPLEQ_HEAD(, CryptoDevLKCFTask) responses;
|
||||
CryptoDevLKCFTask *task, *next;
|
||||
eventfd_t nevent;
|
||||
|
||||
QSIMPLEQ_INIT(&responses);
|
||||
eventfd_read(lkcf->eventfd, &nevent);
|
||||
|
||||
qemu_mutex_lock(&lkcf->rsp_mutex);
|
||||
QSIMPLEQ_PREPEND(&responses, &lkcf->responses);
|
||||
qemu_mutex_unlock(&lkcf->rsp_mutex);
|
||||
|
||||
QSIMPLEQ_FOREACH_SAFE(task, &responses, queue, next) {
|
||||
if (task->cb) {
|
||||
task->cb(task->opaque, task->status);
|
||||
}
|
||||
g_free(task);
|
||||
}
|
||||
}
|
||||
|
||||
static int cryptodev_lkcf_set_op_desc(QCryptoAkCipherOptions *opts,
|
||||
char *key_desc,
|
||||
size_t desc_len,
|
||||
Error **errp)
|
||||
{
|
||||
QCryptoAkCipherOptionsRSA *rsa_opt;
|
||||
if (opts->alg != QCRYPTO_AKCIPHER_ALG_RSA) {
|
||||
error_setg(errp, "Unsupported alg: %u", opts->alg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
rsa_opt = &opts->u.rsa;
|
||||
if (rsa_opt->padding_alg == QCRYPTO_RSA_PADDING_ALG_PKCS1) {
|
||||
snprintf(key_desc, desc_len, "enc=%s hash=%s",
|
||||
QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg),
|
||||
QCryptoHashAlgorithm_str(rsa_opt->hash_alg));
|
||||
|
||||
} else {
|
||||
snprintf(key_desc, desc_len, "enc=%s",
|
||||
QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cryptodev_lkcf_set_rsa_opt(int virtio_padding_alg,
|
||||
int virtio_hash_alg,
|
||||
QCryptoAkCipherOptionsRSA *opt,
|
||||
Error **errp)
|
||||
{
|
||||
if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_PKCS1_PADDING) {
|
||||
opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1;
|
||||
|
||||
switch (virtio_hash_alg) {
|
||||
case VIRTIO_CRYPTO_RSA_MD5:
|
||||
opt->hash_alg = QCRYPTO_HASH_ALG_MD5;
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_RSA_SHA1:
|
||||
opt->hash_alg = QCRYPTO_HASH_ALG_SHA1;
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_RSA_SHA256:
|
||||
opt->hash_alg = QCRYPTO_HASH_ALG_SHA256;
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_RSA_SHA512:
|
||||
opt->hash_alg = QCRYPTO_HASH_ALG_SHA512;
|
||||
break;
|
||||
|
||||
default:
|
||||
error_setg(errp, "Unsupported rsa hash algo: %d", virtio_hash_alg);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_RAW_PADDING) {
|
||||
opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
|
||||
return 0;
|
||||
}
|
||||
|
||||
error_setg(errp, "Unsupported rsa padding algo: %u", virtio_padding_alg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int cryptodev_lkcf_get_unused_session_index(CryptoDevBackendLKCF *lkcf)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < MAX_SESSIONS; i++) {
|
||||
if (lkcf->sess[i] == NULL) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void cryptodev_lkcf_init(CryptoDevBackend *backend, Error **errp)
|
||||
{
|
||||
/* Only support one queue */
|
||||
int queues = backend->conf.peers.queues, i;
|
||||
CryptoDevBackendClient *cc;
|
||||
CryptoDevBackendLKCF *lkcf =
|
||||
CRYPTODEV_BACKEND_LKCF(backend);
|
||||
|
||||
if (queues != 1) {
|
||||
error_setg(errp,
|
||||
"Only support one queue in cryptodev-builtin backend");
|
||||
return;
|
||||
}
|
||||
lkcf->eventfd = eventfd(0, 0);
|
||||
if (lkcf->eventfd < 0) {
|
||||
error_setg(errp, "Failed to create eventfd: %d", errno);
|
||||
return;
|
||||
}
|
||||
|
||||
cc = cryptodev_backend_new_client("cryptodev-lkcf", NULL);
|
||||
cc->info_str = g_strdup_printf("cryptodev-lkcf0");
|
||||
cc->queue_index = 0;
|
||||
cc->type = CRYPTODEV_BACKEND_TYPE_LKCF;
|
||||
backend->conf.peers.ccs[0] = cc;
|
||||
|
||||
backend->conf.crypto_services =
|
||||
1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
|
||||
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
|
||||
lkcf->running = true;
|
||||
|
||||
QSIMPLEQ_INIT(&lkcf->requests);
|
||||
QSIMPLEQ_INIT(&lkcf->responses);
|
||||
qemu_mutex_init(&lkcf->mutex);
|
||||
qemu_mutex_init(&lkcf->rsp_mutex);
|
||||
qemu_cond_init(&lkcf->cond);
|
||||
for (i = 0; i < NR_WORKER_THREAD; i++) {
|
||||
qemu_thread_create(&lkcf->worker_threads[i], "lkcf-worker",
|
||||
cryptodev_lkcf_worker, lkcf, 0);
|
||||
}
|
||||
qemu_set_fd_handler(
|
||||
lkcf->eventfd, cryptodev_lkcf_handle_response, NULL, lkcf);
|
||||
cryptodev_backend_set_ready(backend, true);
|
||||
}
|
||||
|
||||
static void cryptodev_lkcf_cleanup(CryptoDevBackend *backend, Error **errp)
|
||||
{
|
||||
CryptoDevBackendLKCF *lkcf = CRYPTODEV_BACKEND_LKCF(backend);
|
||||
size_t i;
|
||||
int queues = backend->conf.peers.queues;
|
||||
CryptoDevBackendClient *cc;
|
||||
CryptoDevLKCFTask *task, *next;
|
||||
|
||||
qemu_mutex_lock(&lkcf->mutex);
|
||||
lkcf->running = false;
|
||||
qemu_mutex_unlock(&lkcf->mutex);
|
||||
qemu_cond_broadcast(&lkcf->cond);
|
||||
|
||||
close(lkcf->eventfd);
|
||||
for (i = 0; i < NR_WORKER_THREAD; i++) {
|
||||
qemu_thread_join(&lkcf->worker_threads[i]);
|
||||
}
|
||||
|
||||
QSIMPLEQ_FOREACH_SAFE(task, &lkcf->requests, queue, next) {
|
||||
if (task->cb) {
|
||||
task->cb(task->opaque, task->status);
|
||||
}
|
||||
g_free(task);
|
||||
}
|
||||
|
||||
QSIMPLEQ_FOREACH_SAFE(task, &lkcf->responses, queue, next) {
|
||||
if (task->cb) {
|
||||
task->cb(task->opaque, task->status);
|
||||
}
|
||||
g_free(task);
|
||||
}
|
||||
|
||||
qemu_mutex_destroy(&lkcf->mutex);
|
||||
qemu_cond_destroy(&lkcf->cond);
|
||||
qemu_mutex_destroy(&lkcf->rsp_mutex);
|
||||
|
||||
for (i = 0; i < MAX_SESSIONS; i++) {
|
||||
if (lkcf->sess[i] != NULL) {
|
||||
cryptodev_lkcf_close_session(backend, i, 0, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < queues; i++) {
|
||||
cc = backend->conf.peers.ccs[i];
|
||||
if (cc) {
|
||||
cryptodev_backend_free_client(cc);
|
||||
backend->conf.peers.ccs[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
cryptodev_backend_set_ready(backend, false);
|
||||
}
|
||||
|
||||
static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task)
|
||||
{
|
||||
CryptoDevBackendLKCFSession *session = task->sess;
|
||||
CryptoDevBackendAsymOpInfo *asym_op_info;
|
||||
bool kick = false;
|
||||
int ret, status, op_code = task->op_info->op_code;
|
||||
size_t p8info_len;
|
||||
g_autofree uint8_t *p8info = NULL;
|
||||
Error *local_error = NULL;
|
||||
key_serial_t key_id = INVALID_KEY_ID;
|
||||
char op_desc[64];
|
||||
g_autoptr(QCryptoAkCipher) akcipher = NULL;
|
||||
|
||||
/**
|
||||
* We only offload private key session:
|
||||
* 1. currently, the Linux kernel can only accept public key wrapped
|
||||
* with X.509 certificates, but unfortunately the cost of making a
|
||||
* ceritificate with public key is too expensive.
|
||||
* 2. generally, public key related compution is fast, just compute it with
|
||||
* thread-pool.
|
||||
*/
|
||||
if (session->keytype == QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE) {
|
||||
if (qcrypto_akcipher_export_p8info(&session->akcipher_opts,
|
||||
session->key, session->keylen,
|
||||
&p8info, &p8info_len,
|
||||
&local_error) != 0 ||
|
||||
cryptodev_lkcf_set_op_desc(&session->akcipher_opts, op_desc,
|
||||
sizeof(op_desc), &local_error) != 0) {
|
||||
error_report_err(local_error);
|
||||
} else {
|
||||
key_id = add_key(KCTL_KEY_TYPE_PKEY, "lkcf-backend-priv-key",
|
||||
p8info, p8info_len, KCTL_KEY_RING);
|
||||
}
|
||||
}
|
||||
|
||||
if (key_id < 0) {
|
||||
if (!qcrypto_akcipher_supports(&session->akcipher_opts)) {
|
||||
status = -VIRTIO_CRYPTO_NOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
akcipher = qcrypto_akcipher_new(&session->akcipher_opts,
|
||||
session->keytype,
|
||||
session->key, session->keylen,
|
||||
&local_error);
|
||||
if (!akcipher) {
|
||||
status = -VIRTIO_CRYPTO_ERR;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
asym_op_info = task->op_info->u.asym_op_info;
|
||||
switch (op_code) {
|
||||
case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
|
||||
if (key_id >= 0) {
|
||||
ret = keyctl_pkey_encrypt(key_id, op_desc,
|
||||
asym_op_info->src, asym_op_info->src_len,
|
||||
asym_op_info->dst, asym_op_info->dst_len);
|
||||
} else {
|
||||
ret = qcrypto_akcipher_encrypt(akcipher,
|
||||
asym_op_info->src, asym_op_info->src_len,
|
||||
asym_op_info->dst, asym_op_info->dst_len, &local_error);
|
||||
}
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
|
||||
if (key_id >= 0) {
|
||||
ret = keyctl_pkey_decrypt(key_id, op_desc,
|
||||
asym_op_info->src, asym_op_info->src_len,
|
||||
asym_op_info->dst, asym_op_info->dst_len);
|
||||
} else {
|
||||
ret = qcrypto_akcipher_decrypt(akcipher,
|
||||
asym_op_info->src, asym_op_info->src_len,
|
||||
asym_op_info->dst, asym_op_info->dst_len, &local_error);
|
||||
}
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_AKCIPHER_SIGN:
|
||||
if (key_id >= 0) {
|
||||
ret = keyctl_pkey_sign(key_id, op_desc,
|
||||
asym_op_info->src, asym_op_info->src_len,
|
||||
asym_op_info->dst, asym_op_info->dst_len);
|
||||
} else {
|
||||
ret = qcrypto_akcipher_sign(akcipher,
|
||||
asym_op_info->src, asym_op_info->src_len,
|
||||
asym_op_info->dst, asym_op_info->dst_len, &local_error);
|
||||
}
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
|
||||
if (key_id >= 0) {
|
||||
ret = keyctl_pkey_verify(key_id, op_desc,
|
||||
asym_op_info->src, asym_op_info->src_len,
|
||||
asym_op_info->dst, asym_op_info->dst_len);
|
||||
} else {
|
||||
ret = qcrypto_akcipher_verify(akcipher,
|
||||
asym_op_info->src, asym_op_info->src_len,
|
||||
asym_op_info->dst, asym_op_info->dst_len, &local_error);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
error_setg(&local_error, "Unknown opcode: %u", op_code);
|
||||
status = -VIRTIO_CRYPTO_ERR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
if (!local_error) {
|
||||
if (errno != EKEYREJECTED) {
|
||||
error_report("Failed do operation with keyctl: %d", errno);
|
||||
}
|
||||
} else {
|
||||
error_report_err(local_error);
|
||||
}
|
||||
status = op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY ?
|
||||
-VIRTIO_CRYPTO_KEY_REJECTED : -VIRTIO_CRYPTO_ERR;
|
||||
} else {
|
||||
status = VIRTIO_CRYPTO_OK;
|
||||
asym_op_info->dst_len = ret;
|
||||
}
|
||||
|
||||
out:
|
||||
if (key_id >= 0) {
|
||||
keyctl_unlink(key_id, KCTL_KEY_RING);
|
||||
}
|
||||
task->status = status;
|
||||
|
||||
qemu_mutex_lock(&task->lkcf->rsp_mutex);
|
||||
if (QSIMPLEQ_EMPTY(&task->lkcf->responses)) {
|
||||
kick = true;
|
||||
}
|
||||
QSIMPLEQ_INSERT_TAIL(&task->lkcf->responses, task, queue);
|
||||
qemu_mutex_unlock(&task->lkcf->rsp_mutex);
|
||||
|
||||
if (kick) {
|
||||
eventfd_write(task->lkcf->eventfd, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void *cryptodev_lkcf_worker(void *arg)
|
||||
{
|
||||
CryptoDevBackendLKCF *backend = (CryptoDevBackendLKCF *)arg;
|
||||
CryptoDevLKCFTask *task;
|
||||
|
||||
for (;;) {
|
||||
task = NULL;
|
||||
qemu_mutex_lock(&backend->mutex);
|
||||
while (backend->running && QSIMPLEQ_EMPTY(&backend->requests)) {
|
||||
qemu_cond_wait(&backend->cond, &backend->mutex);
|
||||
}
|
||||
if (backend->running) {
|
||||
task = QSIMPLEQ_FIRST(&backend->requests);
|
||||
QSIMPLEQ_REMOVE_HEAD(&backend->requests, queue);
|
||||
}
|
||||
qemu_mutex_unlock(&backend->mutex);
|
||||
|
||||
/* stopped */
|
||||
if (!task) {
|
||||
break;
|
||||
}
|
||||
cryptodev_lkcf_execute_task(task);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int cryptodev_lkcf_operation(
|
||||
CryptoDevBackend *backend,
|
||||
CryptoDevBackendOpInfo *op_info,
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendLKCF *lkcf =
|
||||
CRYPTODEV_BACKEND_LKCF(backend);
|
||||
CryptoDevBackendLKCFSession *sess;
|
||||
enum CryptoDevBackendAlgType algtype = op_info->algtype;
|
||||
CryptoDevLKCFTask *task;
|
||||
|
||||
if (op_info->session_id >= MAX_SESSIONS ||
|
||||
lkcf->sess[op_info->session_id] == NULL) {
|
||||
error_report("Cannot find a valid session id: %" PRIu64 "",
|
||||
op_info->session_id);
|
||||
return -VIRTIO_CRYPTO_INVSESS;
|
||||
}
|
||||
|
||||
sess = lkcf->sess[op_info->session_id];
|
||||
if (algtype != CRYPTODEV_BACKEND_ALG_ASYM) {
|
||||
error_report("algtype not supported: %u", algtype);
|
||||
return -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
|
||||
task = g_new0(CryptoDevLKCFTask, 1);
|
||||
task->op_info = op_info;
|
||||
task->cb = cb;
|
||||
task->opaque = opaque;
|
||||
task->sess = sess;
|
||||
task->lkcf = lkcf;
|
||||
task->status = -VIRTIO_CRYPTO_ERR;
|
||||
|
||||
qemu_mutex_lock(&lkcf->mutex);
|
||||
QSIMPLEQ_INSERT_TAIL(&lkcf->requests, task, queue);
|
||||
qemu_mutex_unlock(&lkcf->mutex);
|
||||
qemu_cond_signal(&lkcf->cond);
|
||||
|
||||
return VIRTIO_CRYPTO_OK;
|
||||
}
|
||||
|
||||
static int cryptodev_lkcf_create_asym_session(
|
||||
CryptoDevBackendLKCF *lkcf,
|
||||
CryptoDevBackendAsymSessionInfo *sess_info,
|
||||
uint64_t *session_id)
|
||||
{
|
||||
Error *local_error = NULL;
|
||||
int index;
|
||||
g_autofree CryptoDevBackendLKCFSession *sess =
|
||||
g_new0(CryptoDevBackendLKCFSession, 1);
|
||||
|
||||
switch (sess_info->algo) {
|
||||
case VIRTIO_CRYPTO_AKCIPHER_RSA:
|
||||
sess->akcipher_opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
|
||||
if (cryptodev_lkcf_set_rsa_opt(
|
||||
sess_info->u.rsa.padding_algo, sess_info->u.rsa.hash_algo,
|
||||
&sess->akcipher_opts.u.rsa, &local_error) != 0) {
|
||||
error_report_err(local_error);
|
||||
return -VIRTIO_CRYPTO_ERR;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
error_report("Unsupported asym alg %u", sess_info->algo);
|
||||
return -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
|
||||
switch (sess_info->keytype) {
|
||||
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
|
||||
sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
|
||||
sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
|
||||
break;
|
||||
|
||||
default:
|
||||
error_report("Unknown akcipher keytype: %u", sess_info->keytype);
|
||||
return -VIRTIO_CRYPTO_ERR;
|
||||
}
|
||||
|
||||
index = cryptodev_lkcf_get_unused_session_index(lkcf);
|
||||
if (index < 0) {
|
||||
error_report("Total number of sessions created exceeds %u",
|
||||
MAX_SESSIONS);
|
||||
return -VIRTIO_CRYPTO_ERR;
|
||||
}
|
||||
|
||||
sess->keylen = sess_info->keylen;
|
||||
sess->key = g_malloc(sess_info->keylen);
|
||||
memcpy(sess->key, sess_info->key, sess_info->keylen);
|
||||
|
||||
lkcf->sess[index] = g_steal_pointer(&sess);
|
||||
*session_id = index;
|
||||
|
||||
return VIRTIO_CRYPTO_OK;
|
||||
}
|
||||
|
||||
static int cryptodev_lkcf_create_session(
|
||||
CryptoDevBackend *backend,
|
||||
CryptoDevBackendSessionInfo *sess_info,
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendAsymSessionInfo *asym_sess_info;
|
||||
CryptoDevBackendLKCF *lkcf =
|
||||
CRYPTODEV_BACKEND_LKCF(backend);
|
||||
int ret;
|
||||
|
||||
switch (sess_info->op_code) {
|
||||
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
|
||||
asym_sess_info = &sess_info->u.asym_sess_info;
|
||||
ret = cryptodev_lkcf_create_asym_session(
|
||||
lkcf, asym_sess_info, &sess_info->session_id);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -VIRTIO_CRYPTO_NOTSUPP;
|
||||
error_report("Unsupported opcode: %" PRIu32 "",
|
||||
sess_info->op_code);
|
||||
break;
|
||||
}
|
||||
if (cb) {
|
||||
cb(opaque, ret);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cryptodev_lkcf_close_session(CryptoDevBackend *backend,
|
||||
uint64_t session_id,
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendLKCF *lkcf = CRYPTODEV_BACKEND_LKCF(backend);
|
||||
CryptoDevBackendLKCFSession *session;
|
||||
|
||||
assert(session_id < MAX_SESSIONS && lkcf->sess[session_id]);
|
||||
session = lkcf->sess[session_id];
|
||||
lkcf->sess[session_id] = NULL;
|
||||
|
||||
g_free(session->key);
|
||||
g_free(session);
|
||||
|
||||
if (cb) {
|
||||
cb(opaque, VIRTIO_CRYPTO_OK);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cryptodev_lkcf_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc);
|
||||
|
||||
bc->init = cryptodev_lkcf_init;
|
||||
bc->cleanup = cryptodev_lkcf_cleanup;
|
||||
bc->create_session = cryptodev_lkcf_create_session;
|
||||
bc->close_session = cryptodev_lkcf_close_session;
|
||||
bc->do_op = cryptodev_lkcf_operation;
|
||||
}
|
||||
|
||||
static const TypeInfo cryptodev_builtin_info = {
|
||||
.name = TYPE_CRYPTODEV_BACKEND_LKCF,
|
||||
.parent = TYPE_CRYPTODEV_BACKEND,
|
||||
.class_init = cryptodev_lkcf_class_init,
|
||||
.instance_size = sizeof(CryptoDevBackendLKCF),
|
||||
};
|
||||
|
||||
static void cryptodev_lkcf_register_types(void)
|
||||
{
|
||||
type_register_static(&cryptodev_builtin_info);
|
||||
}
|
||||
|
||||
type_init(cryptodev_lkcf_register_types);
|
@ -259,13 +259,18 @@ static int64_t cryptodev_vhost_user_sym_create_session(
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int64_t cryptodev_vhost_user_create_session(
|
||||
static int cryptodev_vhost_user_create_session(
|
||||
CryptoDevBackend *backend,
|
||||
CryptoDevBackendSessionInfo *sess_info,
|
||||
uint32_t queue_index, Error **errp)
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
uint32_t op_code = sess_info->op_code;
|
||||
CryptoDevBackendSymSessionInfo *sym_sess_info;
|
||||
int64_t ret;
|
||||
Error *local_error = NULL;
|
||||
int status;
|
||||
|
||||
switch (op_code) {
|
||||
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
|
||||
@ -273,27 +278,42 @@ static int64_t cryptodev_vhost_user_create_session(
|
||||
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
|
||||
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
|
||||
sym_sess_info = &sess_info->u.sym_sess_info;
|
||||
return cryptodev_vhost_user_sym_create_session(backend, sym_sess_info,
|
||||
queue_index, errp);
|
||||
default:
|
||||
error_setg(errp, "Unsupported opcode :%" PRIu32 "",
|
||||
sess_info->op_code);
|
||||
return -1;
|
||||
ret = cryptodev_vhost_user_sym_create_session(backend, sym_sess_info,
|
||||
queue_index, &local_error);
|
||||
break;
|
||||
|
||||
default:
|
||||
error_setg(&local_error, "Unsupported opcode :%" PRIu32 "",
|
||||
sess_info->op_code);
|
||||
return -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
|
||||
return -1;
|
||||
if (local_error) {
|
||||
error_report_err(local_error);
|
||||
}
|
||||
if (ret < 0) {
|
||||
status = -VIRTIO_CRYPTO_ERR;
|
||||
} else {
|
||||
sess_info->session_id = ret;
|
||||
status = VIRTIO_CRYPTO_OK;
|
||||
}
|
||||
if (cb) {
|
||||
cb(opaque, status);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cryptodev_vhost_user_close_session(
|
||||
CryptoDevBackend *backend,
|
||||
uint64_t session_id,
|
||||
uint32_t queue_index, Error **errp)
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendClient *cc =
|
||||
backend->conf.peers.ccs[queue_index];
|
||||
CryptoDevBackendVhost *vhost_crypto;
|
||||
int ret;
|
||||
int ret = -1, status;
|
||||
|
||||
vhost_crypto = cryptodev_vhost_user_get_vhost(cc, backend, queue_index);
|
||||
if (vhost_crypto) {
|
||||
@ -301,12 +321,17 @@ static int cryptodev_vhost_user_close_session(
|
||||
ret = dev->vhost_ops->vhost_crypto_close_session(dev,
|
||||
session_id);
|
||||
if (ret < 0) {
|
||||
return -1;
|
||||
status = -VIRTIO_CRYPTO_ERR;
|
||||
} else {
|
||||
return 0;
|
||||
status = VIRTIO_CRYPTO_OK;
|
||||
}
|
||||
} else {
|
||||
status = -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
return -1;
|
||||
if (cb) {
|
||||
cb(opaque, status);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cryptodev_vhost_user_cleanup(
|
||||
@ -339,7 +364,7 @@ static void cryptodev_vhost_user_set_chardev(Object *obj,
|
||||
CRYPTODEV_BACKEND_VHOST_USER(obj);
|
||||
|
||||
if (s->opened) {
|
||||
error_setg(errp, QERR_PERMISSION_DENIED);
|
||||
error_setg(errp, "Property 'chardev' can no longer be set");
|
||||
} else {
|
||||
g_free(s->chr_name);
|
||||
s->chr_name = g_strdup(value);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/visitor.h"
|
||||
#include "qemu/config-file.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
#include "hw/virtio/virtio-crypto.h"
|
||||
|
||||
@ -72,69 +73,72 @@ void cryptodev_backend_cleanup(
|
||||
}
|
||||
}
|
||||
|
||||
int64_t cryptodev_backend_create_session(
|
||||
int cryptodev_backend_create_session(
|
||||
CryptoDevBackend *backend,
|
||||
CryptoDevBackendSessionInfo *sess_info,
|
||||
uint32_t queue_index, Error **errp)
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendClass *bc =
|
||||
CRYPTODEV_BACKEND_GET_CLASS(backend);
|
||||
|
||||
if (bc->create_session) {
|
||||
return bc->create_session(backend, sess_info, queue_index, errp);
|
||||
return bc->create_session(backend, sess_info, queue_index, cb, opaque);
|
||||
}
|
||||
|
||||
return -1;
|
||||
return -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
|
||||
int cryptodev_backend_close_session(
|
||||
CryptoDevBackend *backend,
|
||||
uint64_t session_id,
|
||||
uint32_t queue_index, Error **errp)
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendClass *bc =
|
||||
CRYPTODEV_BACKEND_GET_CLASS(backend);
|
||||
|
||||
if (bc->close_session) {
|
||||
return bc->close_session(backend, session_id, queue_index, errp);
|
||||
return bc->close_session(backend, session_id, queue_index, cb, opaque);
|
||||
}
|
||||
|
||||
return -1;
|
||||
return -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
|
||||
static int cryptodev_backend_operation(
|
||||
CryptoDevBackend *backend,
|
||||
CryptoDevBackendOpInfo *op_info,
|
||||
uint32_t queue_index, Error **errp)
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb,
|
||||
void *opaque)
|
||||
{
|
||||
CryptoDevBackendClass *bc =
|
||||
CRYPTODEV_BACKEND_GET_CLASS(backend);
|
||||
|
||||
if (bc->do_op) {
|
||||
return bc->do_op(backend, op_info, queue_index, errp);
|
||||
return bc->do_op(backend, op_info, queue_index, cb, opaque);
|
||||
}
|
||||
|
||||
return -VIRTIO_CRYPTO_ERR;
|
||||
return -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
|
||||
int cryptodev_backend_crypto_operation(
|
||||
CryptoDevBackend *backend,
|
||||
void *opaque,
|
||||
uint32_t queue_index, Error **errp)
|
||||
void *opaque1,
|
||||
uint32_t queue_index,
|
||||
CryptoDevCompletionFunc cb, void *opaque2)
|
||||
{
|
||||
VirtIOCryptoReq *req = opaque;
|
||||
VirtIOCryptoReq *req = opaque1;
|
||||
CryptoDevBackendOpInfo *op_info = &req->op_info;
|
||||
enum CryptoDevBackendAlgType algtype = req->flags;
|
||||
|
||||
if ((algtype != CRYPTODEV_BACKEND_ALG_SYM)
|
||||
&& (algtype != CRYPTODEV_BACKEND_ALG_ASYM)) {
|
||||
error_setg(errp, "Unsupported cryptodev alg type: %" PRIu32 "",
|
||||
algtype);
|
||||
|
||||
error_report("Unsupported cryptodev alg type: %" PRIu32 "", algtype);
|
||||
return -VIRTIO_CRYPTO_NOTSUPP;
|
||||
}
|
||||
|
||||
return cryptodev_backend_operation(backend, op_info, queue_index, errp);
|
||||
return cryptodev_backend_operation(backend, op_info, queue_index,
|
||||
cb, opaque2);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -232,7 +232,8 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value,
|
||||
void *ptr = memory_region_get_ram_ptr(&backend->mr);
|
||||
uint64_t sz = memory_region_size(&backend->mr);
|
||||
|
||||
os_mem_prealloc(fd, ptr, sz, backend->prealloc_threads, &local_err);
|
||||
qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads,
|
||||
backend->prealloc_context, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
@ -383,8 +384,9 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
|
||||
* specified NUMA policy in place.
|
||||
*/
|
||||
if (backend->prealloc) {
|
||||
os_mem_prealloc(memory_region_get_fd(&backend->mr), ptr, sz,
|
||||
backend->prealloc_threads, &local_err);
|
||||
qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz,
|
||||
backend->prealloc_threads,
|
||||
backend->prealloc_context, &local_err);
|
||||
if (local_err) {
|
||||
goto out;
|
||||
}
|
||||
@ -492,6 +494,11 @@ host_memory_backend_class_init(ObjectClass *oc, void *data)
|
||||
NULL, NULL);
|
||||
object_class_property_set_description(oc, "prealloc-threads",
|
||||
"Number of CPU threads to use for prealloc");
|
||||
object_class_property_add_link(oc, "prealloc-context",
|
||||
TYPE_THREAD_CONTEXT, offsetof(HostMemoryBackend, prealloc_context),
|
||||
object_property_allow_set_link, OBJ_PROP_LINK_STRONG);
|
||||
object_class_property_set_description(oc, "prealloc-context",
|
||||
"Context to use for creating CPU threads for preallocation");
|
||||
object_class_property_add(oc, "size", "int",
|
||||
host_memory_backend_get_size,
|
||||
host_memory_backend_set_size,
|
||||
|
@ -12,6 +12,9 @@ softmmu_ss.add([files(
|
||||
softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c'))
|
||||
if keyutils.found()
|
||||
softmmu_ss.add(keyutils, files('cryptodev-lkcf.c'))
|
||||
endif
|
||||
if have_vhost_user
|
||||
softmmu_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c'))
|
||||
endif
|
||||
|
@ -116,7 +116,7 @@ static void rng_egd_set_chardev(Object *obj, const char *value, Error **errp)
|
||||
RngEgd *s = RNG_EGD(b);
|
||||
|
||||
if (b->opened) {
|
||||
error_setg(errp, QERR_PERMISSION_DENIED);
|
||||
error_setg(errp, "Property 'chardev' can no longer be set");
|
||||
} else {
|
||||
g_free(s->chr_name);
|
||||
s->chr_name = g_strdup(value);
|
||||
|
@ -96,7 +96,7 @@ static void rng_random_set_filename(Object *obj, const char *filename,
|
||||
RngRandom *s = RNG_RANDOM(obj);
|
||||
|
||||
if (b->opened) {
|
||||
error_setg(errp, QERR_PERMISSION_DENIED);
|
||||
error_setg(errp, "Property 'filename' can no longer be set");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ static void set_chardev(Object *obj, const char *value, Error **errp)
|
||||
Chardev *chr;
|
||||
|
||||
if (b->completed) {
|
||||
error_setg(errp, QERR_PERMISSION_DENIED);
|
||||
error_setg(errp, "Property 'chardev' can no longer be set");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ static void coroutine_fn backup_pause(Job *job)
|
||||
}
|
||||
}
|
||||
|
||||
static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed)
|
||||
static void backup_set_speed(BlockJob *job, int64_t speed)
|
||||
{
|
||||
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
|
||||
|
||||
|
@ -503,12 +503,9 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
|
||||
/* Open the image file */
|
||||
bs->file = bdrv_open_child(qemu_opt_get(opts, "x-image"), options, "image",
|
||||
bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
ret = -EINVAL;
|
||||
ret = bdrv_open_file_child(qemu_opt_get(opts, "x-image"), options, "image",
|
||||
bs, errp);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -672,7 +669,7 @@ blkdebug_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
||||
static int blkdebug_co_flush(BlockDriverState *bs)
|
||||
static int coroutine_fn blkdebug_co_flush(BlockDriverState *bs)
|
||||
{
|
||||
int err = rule_check(bs, 0, 0, BLKDEBUG_IO_TYPE_FLUSH);
|
||||
|
||||
|
1047
block/blkio.c
Normal file
1047
block/blkio.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -155,11 +155,8 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
|
||||
/* Open the file */
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, false,
|
||||
errp);
|
||||
if (!bs->file) {
|
||||
ret = -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -257,10 +254,6 @@ fail_log:
|
||||
s->log_file = NULL;
|
||||
}
|
||||
fail:
|
||||
if (ret < 0) {
|
||||
bdrv_unref_child(bs, bs->file);
|
||||
bs->file = NULL;
|
||||
}
|
||||
qemu_opts_del(opts);
|
||||
return ret;
|
||||
}
|
||||
|
@ -26,11 +26,8 @@ static int blkreplay_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
int ret;
|
||||
|
||||
/* Open the image file */
|
||||
bs->file = bdrv_open_child(NULL, options, "image", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
ret = -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "image", bs, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -122,12 +122,9 @@ static int blkverify_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
|
||||
/* Open the raw file */
|
||||
bs->file = bdrv_open_child(qemu_opt_get(opts, "x-raw"), options, "raw",
|
||||
bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
ret = -EINVAL;
|
||||
ret = bdrv_open_file_child(qemu_opt_get(opts, "x-raw"), options, "raw",
|
||||
bs, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -235,8 +232,8 @@ blkverify_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
qemu_iovec_init(&raw_qiov, qiov->niov);
|
||||
qemu_iovec_clone(&raw_qiov, qiov, buf);
|
||||
|
||||
ret = blkverify_co_prwv(bs, &r, offset, bytes, qiov, &raw_qiov, flags,
|
||||
false);
|
||||
ret = blkverify_co_prwv(bs, &r, offset, bytes, qiov, &raw_qiov,
|
||||
flags & ~BDRV_REQ_REGISTERED_BUF, false);
|
||||
|
||||
cmp_offset = qemu_iovec_compare(qiov, &raw_qiov);
|
||||
if (cmp_offset != -1) {
|
||||
|
@ -134,10 +134,9 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter);
|
||||
static void blk_root_change_media(BdrvChild *child, bool load);
|
||||
static void blk_root_resize(BdrvChild *child);
|
||||
|
||||
static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
|
||||
GSList **ignore, Error **errp);
|
||||
static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
|
||||
GSList **ignore);
|
||||
static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx,
|
||||
GHashTable *visited, Transaction *tran,
|
||||
Error **errp);
|
||||
|
||||
static char *blk_root_get_parent_desc(BdrvChild *child)
|
||||
{
|
||||
@ -334,8 +333,7 @@ static const BdrvChildClass child_root = {
|
||||
.attach = blk_root_attach,
|
||||
.detach = blk_root_detach,
|
||||
|
||||
.can_set_aio_ctx = blk_root_can_set_aio_ctx,
|
||||
.set_aio_ctx = blk_root_set_aio_ctx,
|
||||
.change_aio_ctx = blk_root_change_aio_ctx,
|
||||
|
||||
.get_parent_aio_context = blk_root_get_parent_aio_context,
|
||||
};
|
||||
@ -1946,7 +1944,7 @@ bool blk_enable_write_cache(BlockBackend *blk)
|
||||
|
||||
void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
IO_CODE();
|
||||
blk->enable_write_cache = wce;
|
||||
}
|
||||
|
||||
@ -2149,8 +2147,11 @@ static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
|
||||
bdrv_ref(bs);
|
||||
|
||||
if (update_root_node) {
|
||||
ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root,
|
||||
errp);
|
||||
/*
|
||||
* update_root_node MUST be false for blk_root_set_aio_ctx_commit(),
|
||||
* as we are already in the commit function of a transaction.
|
||||
*/
|
||||
ret = bdrv_try_change_aio_context(bs, new_context, blk->root, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_unref(bs);
|
||||
return ret;
|
||||
@ -2177,31 +2178,52 @@ int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
|
||||
return blk_do_set_aio_context(blk, new_context, true, errp);
|
||||
}
|
||||
|
||||
static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
|
||||
GSList **ignore, Error **errp)
|
||||
typedef struct BdrvStateBlkRootContext {
|
||||
AioContext *new_ctx;
|
||||
BlockBackend *blk;
|
||||
} BdrvStateBlkRootContext;
|
||||
|
||||
static void blk_root_set_aio_ctx_commit(void *opaque)
|
||||
{
|
||||
BlockBackend *blk = child->opaque;
|
||||
BdrvStateBlkRootContext *s = opaque;
|
||||
BlockBackend *blk = s->blk;
|
||||
|
||||
if (blk->allow_aio_context_change) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Only manually created BlockBackends that are not attached to anything
|
||||
* can change their AioContext without updating their user. */
|
||||
if (!blk->name || blk->dev) {
|
||||
/* TODO Add BB name/QOM path */
|
||||
error_setg(errp, "Cannot change iothread of active block backend");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
blk_do_set_aio_context(blk, s->new_ctx, false, &error_abort);
|
||||
}
|
||||
|
||||
static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
|
||||
GSList **ignore)
|
||||
static TransactionActionDrv set_blk_root_context = {
|
||||
.commit = blk_root_set_aio_ctx_commit,
|
||||
.clean = g_free,
|
||||
};
|
||||
|
||||
static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx,
|
||||
GHashTable *visited, Transaction *tran,
|
||||
Error **errp)
|
||||
{
|
||||
BlockBackend *blk = child->opaque;
|
||||
blk_do_set_aio_context(blk, ctx, false, &error_abort);
|
||||
BdrvStateBlkRootContext *s;
|
||||
|
||||
if (!blk->allow_aio_context_change) {
|
||||
/*
|
||||
* Manually created BlockBackends (those with a name) that are not
|
||||
* attached to anything can change their AioContext without updating
|
||||
* their user; return an error for others.
|
||||
*/
|
||||
if (!blk->name || blk->dev) {
|
||||
/* TODO Add BB name/QOM path */
|
||||
error_setg(errp, "Cannot change iothread of active block backend");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
s = g_new(BdrvStateBlkRootContext, 1);
|
||||
*s = (BdrvStateBlkRootContext) {
|
||||
.new_ctx = ctx,
|
||||
.blk = blk,
|
||||
};
|
||||
|
||||
tran_add(tran, &set_blk_root_context, s);
|
||||
return true;
|
||||
}
|
||||
|
||||
void blk_add_aio_context_notifier(BlockBackend *blk,
|
||||
@ -2545,16 +2567,16 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
|
||||
}
|
||||
}
|
||||
|
||||
void blk_register_buf(BlockBackend *blk, void *host, size_t size)
|
||||
bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_register_buf(blk_bs(blk), host, size);
|
||||
return bdrv_register_buf(blk_bs(blk), host, size, errp);
|
||||
}
|
||||
|
||||
void blk_unregister_buf(BlockBackend *blk, void *host)
|
||||
void blk_unregister_buf(BlockBackend *blk, void *host, size_t size)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_unregister_buf(blk_bs(blk), host);
|
||||
bdrv_unregister_buf(blk_bs(blk), host, size);
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
|
||||
|
58
block/block-ram-registrar.c
Normal file
58
block/block-ram-registrar.c
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* BlockBackend RAM Registrar
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "sysemu/block-ram-registrar.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
static void ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
|
||||
size_t max_size)
|
||||
{
|
||||
BlockRAMRegistrar *r = container_of(n, BlockRAMRegistrar, notifier);
|
||||
Error *err = NULL;
|
||||
|
||||
if (!r->ok) {
|
||||
return; /* don't try again if we've already failed */
|
||||
}
|
||||
|
||||
if (!blk_register_buf(r->blk, host, max_size, &err)) {
|
||||
error_report_err(err);
|
||||
ram_block_notifier_remove(&r->notifier);
|
||||
r->ok = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void ram_block_removed(RAMBlockNotifier *n, void *host, size_t size,
|
||||
size_t max_size)
|
||||
{
|
||||
BlockRAMRegistrar *r = container_of(n, BlockRAMRegistrar, notifier);
|
||||
blk_unregister_buf(r->blk, host, max_size);
|
||||
}
|
||||
|
||||
void blk_ram_registrar_init(BlockRAMRegistrar *r, BlockBackend *blk)
|
||||
{
|
||||
r->blk = blk;
|
||||
r->notifier = (RAMBlockNotifier){
|
||||
.ram_block_added = ram_block_added,
|
||||
.ram_block_removed = ram_block_removed,
|
||||
|
||||
/*
|
||||
* .ram_block_resized() is not necessary because we use the max_size
|
||||
* value that does not change across resize.
|
||||
*/
|
||||
};
|
||||
r->ok = true;
|
||||
|
||||
ram_block_notifier_add(&r->notifier);
|
||||
}
|
||||
|
||||
void blk_ram_registrar_destroy(BlockRAMRegistrar *r)
|
||||
{
|
||||
if (r->ok) {
|
||||
ram_block_notifier_remove(&r->notifier);
|
||||
}
|
||||
}
|
@ -110,10 +110,9 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = bdrv_pread(bs->file, 0, sizeof(bochs), &bochs, 0);
|
||||
|
@ -71,10 +71,9 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* read header */
|
||||
|
@ -135,7 +135,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
|
||||
}
|
||||
|
||||
if (base_len < len) {
|
||||
ret = blk_truncate(s->base, len, false, PREALLOC_MODE_OFF, 0, NULL);
|
||||
ret = blk_co_truncate(s->base, len, false, PREALLOC_MODE_OFF, 0, NULL);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@ -238,6 +238,7 @@ static BlockDriver bdrv_commit_top = {
|
||||
.bdrv_child_perm = bdrv_commit_top_child_perm,
|
||||
|
||||
.is_filter = true,
|
||||
.filtered_child_is_backing = true,
|
||||
};
|
||||
|
||||
void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
|
@ -412,6 +412,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
int64_t cluster_size;
|
||||
g_autoptr(BlockdevOptions) full_opts = NULL;
|
||||
BlockdevOptionsCbw *opts;
|
||||
int ret;
|
||||
|
||||
full_opts = cbw_parse_options(options, errp);
|
||||
if (!full_opts) {
|
||||
@ -420,11 +421,9 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
assert(full_opts->driver == BLOCKDEV_DRIVER_COPY_BEFORE_WRITE);
|
||||
opts = &full_opts->u.copy_before_write;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
s->target = bdrv_open_child(NULL, options, "target", bs, &child_of_bds,
|
||||
|
@ -41,12 +41,11 @@ static int cor_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
BDRVStateCOR *state = bs->opaque;
|
||||
/* Find a bottom node name, if any */
|
||||
const char *bottom_node = qdict_get_try_str(options, "bottom");
|
||||
int ret;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
bs->supported_read_flags = BDRV_REQ_PREFETCH;
|
||||
|
@ -261,15 +261,14 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
|
||||
{
|
||||
BlockCrypto *crypto = bs->opaque;
|
||||
QemuOpts *opts = NULL;
|
||||
int ret = -EINVAL;
|
||||
int ret;
|
||||
QCryptoBlockOpenOptions *open_opts = NULL;
|
||||
unsigned int cflags = 0;
|
||||
QDict *cryptoopts = NULL;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
bs->supported_write_flags = BDRV_REQ_FUA &
|
||||
@ -277,6 +276,7 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
|
||||
|
||||
opts = qemu_opts_create(opts_spec, NULL, 0, &error_abort);
|
||||
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
@ -285,6 +285,7 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
|
||||
|
||||
open_opts = block_crypto_open_opts_init(cryptoopts, errp);
|
||||
if (!open_opts) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
@ -410,7 +411,6 @@ block_crypto_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
uint64_t sector_size = qcrypto_block_get_sector_size(crypto->block);
|
||||
uint64_t payload_offset = qcrypto_block_get_payload_offset(crypto->block);
|
||||
|
||||
assert(!flags);
|
||||
assert(payload_offset < INT64_MAX);
|
||||
assert(QEMU_IS_ALIGNED(offset, sector_size));
|
||||
assert(QEMU_IS_ALIGNED(bytes, sector_size));
|
||||
@ -473,7 +473,8 @@ block_crypto_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
uint64_t sector_size = qcrypto_block_get_sector_size(crypto->block);
|
||||
uint64_t payload_offset = qcrypto_block_get_payload_offset(crypto->block);
|
||||
|
||||
assert(!(flags & ~BDRV_REQ_FUA));
|
||||
flags &= ~BDRV_REQ_REGISTERED_BUF;
|
||||
|
||||
assert(payload_offset < INT64_MAX);
|
||||
assert(QEMU_IS_ALIGNED(offset, sector_size));
|
||||
assert(QEMU_IS_ALIGNED(bytes, sector_size));
|
||||
|
38
block/dmg.c
38
block/dmg.c
@ -254,6 +254,25 @@ static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
|
||||
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
|
||||
s->types[i] = buff_read_uint32(buffer, offset);
|
||||
if (!dmg_is_known_block_type(s->types[i])) {
|
||||
switch (s->types[i]) {
|
||||
case UDBZ:
|
||||
warn_report_once("dmg-bzip2 module is missing, accessing bzip2 "
|
||||
"compressed blocks will result in I/O errors");
|
||||
break;
|
||||
case ULFO:
|
||||
warn_report_once("dmg-lzfse module is missing, accessing lzfse "
|
||||
"compressed blocks will result in I/O errors");
|
||||
break;
|
||||
case UDCM:
|
||||
case UDLE:
|
||||
/* Comments and last entry can be ignored without problems */
|
||||
break;
|
||||
default:
|
||||
warn_report_once("Image contains chunks of unknown type %x, "
|
||||
"accessing them will result in I/O errors",
|
||||
s->types[i]);
|
||||
break;
|
||||
}
|
||||
chunk_count--;
|
||||
i--;
|
||||
offset += 40;
|
||||
@ -440,14 +459,21 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* NB: if uncompress submodules are absent,
|
||||
* ie block_module_load return value == 0, the function pointers
|
||||
* dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL.
|
||||
*/
|
||||
if (block_module_load("dmg-bz2", errp) < 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (block_module_load("dmg-lzfse", errp) < 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
block_module_load_one("dmg-bz2");
|
||||
block_module_load_one("dmg-lzfse");
|
||||
|
||||
s->n_chunks = 0;
|
||||
s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
|
||||
|
@ -129,7 +129,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||
|
||||
/* Ignore errors with fixed-iothread=false */
|
||||
set_context_errp = fixed_iothread ? errp : NULL;
|
||||
ret = bdrv_try_set_aio_context(bs, new_ctx, set_context_errp);
|
||||
ret = bdrv_try_change_aio_context(bs, new_ctx, NULL, set_context_errp);
|
||||
if (ret == 0) {
|
||||
aio_context_release(ctx);
|
||||
aio_context_acquire(new_ctx);
|
||||
|
@ -2133,7 +2133,6 @@ static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
assert(flags == 0);
|
||||
return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE);
|
||||
}
|
||||
|
||||
|
@ -30,11 +30,9 @@
|
||||
static int compress_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
int ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!bs->file->bs->drv || !block_driver_can_compress(bs->file->bs->drv)) {
|
||||
|
@ -1236,7 +1236,6 @@ static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov,
|
||||
int flags)
|
||||
{
|
||||
assert(!flags);
|
||||
return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
|
||||
}
|
||||
|
||||
|
109
block/io.c
109
block/io.c
@ -1130,8 +1130,7 @@ static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
|
||||
int ret;
|
||||
|
||||
bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
|
||||
assert(!(flags & ~BDRV_REQ_MASK));
|
||||
assert(!(flags & BDRV_REQ_NO_FALLBACK));
|
||||
assert(!(flags & ~bs->supported_read_flags));
|
||||
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
@ -1195,23 +1194,29 @@ static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
bool emulate_fua = false;
|
||||
int64_t sector_num;
|
||||
unsigned int nb_sectors;
|
||||
QEMUIOVector local_qiov;
|
||||
int ret;
|
||||
|
||||
bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
|
||||
assert(!(flags & ~BDRV_REQ_MASK));
|
||||
assert(!(flags & BDRV_REQ_NO_FALLBACK));
|
||||
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
if ((flags & BDRV_REQ_FUA) &&
|
||||
(~bs->supported_write_flags & BDRV_REQ_FUA)) {
|
||||
flags &= ~BDRV_REQ_FUA;
|
||||
emulate_fua = true;
|
||||
}
|
||||
|
||||
flags &= bs->supported_write_flags;
|
||||
|
||||
if (drv->bdrv_co_pwritev_part) {
|
||||
ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
|
||||
flags & bs->supported_write_flags);
|
||||
flags &= ~bs->supported_write_flags;
|
||||
flags);
|
||||
goto emulate_flags;
|
||||
}
|
||||
|
||||
@ -1221,9 +1226,7 @@ static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
if (drv->bdrv_co_pwritev) {
|
||||
ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
|
||||
flags & bs->supported_write_flags);
|
||||
flags &= ~bs->supported_write_flags;
|
||||
ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
|
||||
goto emulate_flags;
|
||||
}
|
||||
|
||||
@ -1233,10 +1236,8 @@ static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
|
||||
.coroutine = qemu_coroutine_self(),
|
||||
};
|
||||
|
||||
acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
|
||||
flags & bs->supported_write_flags,
|
||||
acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
|
||||
bdrv_co_io_em_complete, &co);
|
||||
flags &= ~bs->supported_write_flags;
|
||||
if (acb == NULL) {
|
||||
ret = -EIO;
|
||||
} else {
|
||||
@ -1254,12 +1255,10 @@ static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
|
||||
assert(bytes <= BDRV_REQUEST_MAX_BYTES);
|
||||
|
||||
assert(drv->bdrv_co_writev);
|
||||
ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
|
||||
flags & bs->supported_write_flags);
|
||||
flags &= ~bs->supported_write_flags;
|
||||
ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
|
||||
|
||||
emulate_flags:
|
||||
if (ret == 0 && (flags & BDRV_REQ_FUA)) {
|
||||
if (ret == 0 && emulate_fua) {
|
||||
ret = bdrv_co_flush(bs);
|
||||
}
|
||||
|
||||
@ -1487,11 +1486,14 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
|
||||
max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
|
||||
align);
|
||||
|
||||
/* TODO: We would need a per-BDS .supported_read_flags and
|
||||
/*
|
||||
* TODO: We would need a per-BDS .supported_read_flags and
|
||||
* potential fallback support, if we ever implement any read flags
|
||||
* to pass through to drivers. For now, there aren't any
|
||||
* passthrough flags. */
|
||||
assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH)));
|
||||
* passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
|
||||
*/
|
||||
assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
|
||||
BDRV_REQ_REGISTERED_BUF)));
|
||||
|
||||
/* Handle Copy on Read and associated serialisation */
|
||||
if (flags & BDRV_REQ_COPY_ON_READ) {
|
||||
@ -1532,7 +1534,7 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
|
||||
goto out;
|
||||
}
|
||||
|
||||
assert(!(flags & ~bs->supported_read_flags));
|
||||
assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
|
||||
|
||||
max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
|
||||
if (bytes <= max_bytes && bytes <= max_transfer) {
|
||||
@ -1721,7 +1723,8 @@ static void bdrv_padding_destroy(BdrvRequestPadding *pad)
|
||||
static int bdrv_pad_request(BlockDriverState *bs,
|
||||
QEMUIOVector **qiov, size_t *qiov_offset,
|
||||
int64_t *offset, int64_t *bytes,
|
||||
BdrvRequestPadding *pad, bool *padded)
|
||||
BdrvRequestPadding *pad, bool *padded,
|
||||
BdrvRequestFlags *flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1749,6 +1752,10 @@ static int bdrv_pad_request(BlockDriverState *bs,
|
||||
if (padded) {
|
||||
*padded = true;
|
||||
}
|
||||
if (flags) {
|
||||
/* Can't use optimization hint with bounce buffer */
|
||||
*flags &= ~BDRV_REQ_REGISTERED_BUF;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1803,7 +1810,7 @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
|
||||
}
|
||||
|
||||
ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
|
||||
NULL);
|
||||
NULL, &flags);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
@ -1848,6 +1855,11 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
/* By definition there is no user buffer so this flag doesn't make sense */
|
||||
if (flags & BDRV_REQ_REGISTERED_BUF) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Invalidate the cached block-status data range if this write overlaps */
|
||||
bdrv_bsc_invalidate_range(bs, offset, bytes);
|
||||
|
||||
@ -2133,6 +2145,9 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
|
||||
bool padding;
|
||||
BdrvRequestPadding pad;
|
||||
|
||||
/* This flag doesn't make sense for padding or zero writes */
|
||||
flags &= ~BDRV_REQ_REGISTERED_BUF;
|
||||
|
||||
padding = bdrv_init_padding(bs, offset, bytes, &pad);
|
||||
if (padding) {
|
||||
assert(!(flags & BDRV_REQ_NO_WAIT));
|
||||
@ -2250,7 +2265,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
|
||||
* alignment only if there is no ZERO flag.
|
||||
*/
|
||||
ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
|
||||
&padded);
|
||||
&padded, &flags);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -2729,8 +2744,8 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
|
||||
return 1;
|
||||
}
|
||||
|
||||
ret = bdrv_common_block_status_above(bs, NULL, false, false, offset,
|
||||
bytes, &pnum, NULL, NULL, NULL);
|
||||
ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
|
||||
bytes, &pnum, NULL, NULL, NULL);
|
||||
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@ -2739,8 +2754,8 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
|
||||
return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
|
||||
}
|
||||
|
||||
int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum)
|
||||
{
|
||||
int ret;
|
||||
int64_t dummy;
|
||||
@ -3262,29 +3277,57 @@ void bdrv_io_unplug(BlockDriverState *bs)
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
|
||||
/* Helper that undoes bdrv_register_buf() when it fails partway through */
|
||||
static void bdrv_register_buf_rollback(BlockDriverState *bs,
|
||||
void *host,
|
||||
size_t size,
|
||||
BdrvChild *final_child)
|
||||
{
|
||||
BdrvChild *child;
|
||||
|
||||
QLIST_FOREACH(child, &bs->children, next) {
|
||||
if (child == final_child) {
|
||||
break;
|
||||
}
|
||||
|
||||
bdrv_unregister_buf(child->bs, host, size);
|
||||
}
|
||||
|
||||
if (bs->drv && bs->drv->bdrv_unregister_buf) {
|
||||
bs->drv->bdrv_unregister_buf(bs, host, size);
|
||||
}
|
||||
}
|
||||
|
||||
bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
|
||||
Error **errp)
|
||||
{
|
||||
BdrvChild *child;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
if (bs->drv && bs->drv->bdrv_register_buf) {
|
||||
bs->drv->bdrv_register_buf(bs, host, size);
|
||||
if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
QLIST_FOREACH(child, &bs->children, next) {
|
||||
bdrv_register_buf(child->bs, host, size);
|
||||
if (!bdrv_register_buf(child->bs, host, size, errp)) {
|
||||
bdrv_register_buf_rollback(bs, host, size, child);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void bdrv_unregister_buf(BlockDriverState *bs, void *host)
|
||||
void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
|
||||
{
|
||||
BdrvChild *child;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
if (bs->drv && bs->drv->bdrv_unregister_buf) {
|
||||
bs->drv->bdrv_unregister_buf(bs, host);
|
||||
bs->drv->bdrv_unregister_buf(bs, host, size);
|
||||
}
|
||||
QLIST_FOREACH(child, &bs->children, next) {
|
||||
bdrv_unregister_buf(child->bs, host);
|
||||
bdrv_unregister_buf(child->bs, host, size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include <liburing.h>
|
||||
#include "block/aio.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "block/block.h"
|
||||
#include "block/raw-aio.h"
|
||||
@ -19,7 +18,6 @@
|
||||
#include "qapi/error.h"
|
||||
#include "trace.h"
|
||||
|
||||
|
||||
/* io_uring ring size */
|
||||
#define MAX_ENTRIES 128
|
||||
|
||||
@ -432,17 +430,8 @@ LuringState *luring_init(Error **errp)
|
||||
}
|
||||
|
||||
ioq_init(&s->io_q);
|
||||
#ifdef CONFIG_LIBURING_REGISTER_RING_FD
|
||||
if (io_uring_register_ring_fd(&s->ring) < 0) {
|
||||
/*
|
||||
* Only warn about this error: we will fallback to the non-optimized
|
||||
* io_uring operations.
|
||||
*/
|
||||
warn_report("failed to register linux io_uring ring file descriptor");
|
||||
}
|
||||
#endif
|
||||
|
||||
return s;
|
||||
|
||||
}
|
||||
|
||||
void luring_cleanup(LuringState *s)
|
||||
|
@ -46,6 +46,7 @@ block_ss.add(files(
|
||||
), zstd, zlib, gnutls)
|
||||
|
||||
softmmu_ss.add(when: 'CONFIG_TCG', if_true: files('blkreplay.c'))
|
||||
softmmu_ss.add(files('block-ram-registrar.c'))
|
||||
|
||||
if get_option('qcow1').allowed()
|
||||
block_ss.add(files('qcow.c'))
|
||||
@ -92,6 +93,7 @@ block_modules = {}
|
||||
|
||||
modsrc = []
|
||||
foreach m : [
|
||||
[blkio, 'blkio', files('blkio.c')],
|
||||
[curl, 'curl', files('curl.c')],
|
||||
[glusterfs, 'gluster', files('gluster.c')],
|
||||
[libiscsi, 'iscsi', [files('iscsi.c'), libm]],
|
||||
|
@ -922,8 +922,8 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
|
||||
* active layer. */
|
||||
if (s->base == blk_bs(s->target)) {
|
||||
if (s->bdev_length > target_length) {
|
||||
ret = blk_truncate(s->target, s->bdev_length, false,
|
||||
PREALLOC_MODE_OFF, 0, NULL);
|
||||
ret = blk_co_truncate(s->target, s->bdev_length, false,
|
||||
PREALLOC_MODE_OFF, 0, NULL);
|
||||
if (ret < 0) {
|
||||
goto immediate_exit;
|
||||
}
|
||||
@ -1486,6 +1486,8 @@ static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
|
||||
qemu_iovec_init(&bounce_qiov, 1);
|
||||
qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
|
||||
qiov = &bounce_qiov;
|
||||
|
||||
flags &= ~BDRV_REQ_REGISTERED_BUF;
|
||||
}
|
||||
|
||||
ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
|
||||
@ -1587,6 +1589,7 @@ static BlockDriver bdrv_mirror_top = {
|
||||
.bdrv_child_perm = bdrv_mirror_top_child_perm,
|
||||
|
||||
.is_filter = true,
|
||||
.filtered_child_is_backing = true,
|
||||
};
|
||||
|
||||
static BlockJob *mirror_start_job(
|
||||
|
@ -489,7 +489,7 @@ void hmp_nbd_server_stop(Monitor *mon, const QDict *qdict)
|
||||
hmp_handle_error(mon, err);
|
||||
}
|
||||
|
||||
void hmp_block_resize(Monitor *mon, const QDict *qdict)
|
||||
void coroutine_fn hmp_block_resize(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
const char *device = qdict_get_str(qdict, "device");
|
||||
int64_t size = qdict_get_int(qdict, "size");
|
||||
|
@ -1222,7 +1222,6 @@ static int coroutine_fn nbd_client_co_preadv(BlockDriverState *bs, int64_t offse
|
||||
};
|
||||
|
||||
assert(bytes <= NBD_MAX_BUFFER_SIZE);
|
||||
assert(!flags);
|
||||
|
||||
if (!bytes) {
|
||||
return 0;
|
||||
|
@ -418,7 +418,11 @@ static int64_t nfs_client_open(NFSClient *client, BlockdevOptionsNfs *opts,
|
||||
int flags, int open_flags, Error **errp)
|
||||
{
|
||||
int64_t ret = -EINVAL;
|
||||
#ifdef _WIN32
|
||||
struct __stat64 st;
|
||||
#else
|
||||
struct stat st;
|
||||
#endif
|
||||
char *file = NULL, *strp = NULL;
|
||||
|
||||
qemu_mutex_init(&client->mutex);
|
||||
@ -781,7 +785,11 @@ static int nfs_reopen_prepare(BDRVReopenState *state,
|
||||
BlockReopenQueue *queue, Error **errp)
|
||||
{
|
||||
NFSClient *client = state->bs->opaque;
|
||||
#ifdef _WIN32
|
||||
struct __stat64 st;
|
||||
#else
|
||||
struct stat st;
|
||||
#endif
|
||||
int ret = 0;
|
||||
|
||||
if (state->flags & BDRV_O_RDWR && bdrv_is_read_only(state->bs)) {
|
||||
|
20
block/nvme.c
20
block/nvme.c
@ -1587,22 +1587,22 @@ static void nvme_aio_unplug(BlockDriverState *bs)
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
|
||||
static bool nvme_register_buf(BlockDriverState *bs, void *host, size_t size,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, &local_err);
|
||||
if (ret) {
|
||||
/* FIXME: we may run out of IOVA addresses after repeated
|
||||
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
|
||||
* doesn't reclaim addresses for fixed mappings. */
|
||||
error_reportf_err(local_err, "nvme_register_buf failed: ");
|
||||
}
|
||||
/*
|
||||
* FIXME: we may run out of IOVA addresses after repeated
|
||||
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
|
||||
* doesn't reclaim addresses for fixed mappings.
|
||||
*/
|
||||
ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, errp);
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
static void nvme_unregister_buf(BlockDriverState *bs, void *host)
|
||||
static void nvme_unregister_buf(BlockDriverState *bs, void *host, size_t size)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
|
@ -205,18 +205,18 @@ static coroutine_fn int64_t allocate_clusters(BlockDriverState *bs,
|
||||
* force the safer-but-slower fallocate.
|
||||
*/
|
||||
if (s->prealloc_mode == PRL_PREALLOC_MODE_TRUNCATE) {
|
||||
ret = bdrv_truncate(bs->file,
|
||||
(s->data_end + space) << BDRV_SECTOR_BITS,
|
||||
false, PREALLOC_MODE_OFF, BDRV_REQ_ZERO_WRITE,
|
||||
NULL);
|
||||
ret = bdrv_co_truncate(bs->file,
|
||||
(s->data_end + space) << BDRV_SECTOR_BITS,
|
||||
false, PREALLOC_MODE_OFF,
|
||||
BDRV_REQ_ZERO_WRITE, NULL);
|
||||
if (ret == -ENOTSUP) {
|
||||
s->prealloc_mode = PRL_PREALLOC_MODE_FALLOCATE;
|
||||
}
|
||||
}
|
||||
if (s->prealloc_mode == PRL_PREALLOC_MODE_FALLOCATE) {
|
||||
ret = bdrv_pwrite_zeroes(bs->file,
|
||||
s->data_end << BDRV_SECTOR_BITS,
|
||||
space << BDRV_SECTOR_BITS, 0);
|
||||
ret = bdrv_co_pwrite_zeroes(bs->file,
|
||||
s->data_end << BDRV_SECTOR_BITS,
|
||||
space << BDRV_SECTOR_BITS, 0);
|
||||
}
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@ -278,8 +278,8 @@ static coroutine_fn int parallels_co_flush_to_os(BlockDriverState *bs)
|
||||
if (off + to_write > s->header_size) {
|
||||
to_write = s->header_size - off;
|
||||
}
|
||||
ret = bdrv_pwrite(bs->file, off, to_write, (uint8_t *)s->header + off,
|
||||
0);
|
||||
ret = bdrv_co_pwrite(bs->file, off, to_write,
|
||||
(uint8_t *)s->header + off, 0);
|
||||
if (ret < 0) {
|
||||
qemu_co_mutex_unlock(&s->lock);
|
||||
return ret;
|
||||
@ -329,7 +329,6 @@ static coroutine_fn int parallels_co_writev(BlockDriverState *bs,
|
||||
QEMUIOVector hd_qiov;
|
||||
int ret = 0;
|
||||
|
||||
assert(!flags);
|
||||
qemu_iovec_init(&hd_qiov, qiov->niov);
|
||||
|
||||
while (nb_sectors > 0) {
|
||||
@ -504,8 +503,8 @@ static int coroutine_fn parallels_co_check(BlockDriverState *bs,
|
||||
* In order to really repair the image, we must shrink it.
|
||||
* That means we have to pass exact=true.
|
||||
*/
|
||||
ret = bdrv_truncate(bs->file, res->image_end_offset, true,
|
||||
PREALLOC_MODE_OFF, 0, &local_err);
|
||||
ret = bdrv_co_truncate(bs->file, res->image_end_offset, true,
|
||||
PREALLOC_MODE_OFF, 0, &local_err);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
res->check_errors++;
|
||||
@ -600,12 +599,12 @@ static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts,
|
||||
memset(tmp, 0, sizeof(tmp));
|
||||
memcpy(tmp, &header, sizeof(header));
|
||||
|
||||
ret = blk_pwrite(blk, 0, BDRV_SECTOR_SIZE, tmp, 0);
|
||||
ret = blk_co_pwrite(blk, 0, BDRV_SECTOR_SIZE, tmp, 0);
|
||||
if (ret < 0) {
|
||||
goto exit;
|
||||
}
|
||||
ret = blk_pwrite_zeroes(blk, BDRV_SECTOR_SIZE,
|
||||
(bat_sectors - 1) << BDRV_SECTOR_BITS, 0);
|
||||
ret = blk_co_pwrite_zeroes(blk, BDRV_SECTOR_SIZE,
|
||||
(bat_sectors - 1) << BDRV_SECTOR_BITS, 0);
|
||||
if (ret < 0) {
|
||||
goto exit;
|
||||
}
|
||||
@ -737,10 +736,9 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error *local_err = NULL;
|
||||
char *buf;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = bdrv_pread(bs->file, 0, sizeof(ph), &ph, 0);
|
||||
|
@ -134,6 +134,7 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVPreallocateState *s = bs->opaque;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* s->data_end and friends should be initialized on permission update.
|
||||
@ -141,11 +142,9 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
*/
|
||||
s->file_end = s->zero_start = s->data_end = -EINVAL;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!preallocate_absorb_opts(&s->opts, options, bs->file->bs, errp)) {
|
||||
|
68
block/qcow.c
68
block/qcow.c
@ -92,7 +92,8 @@ typedef struct BDRVQcowState {
|
||||
|
||||
static QemuOptsList qcow_create_opts;
|
||||
|
||||
static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset);
|
||||
static int coroutine_fn decompress_cluster(BlockDriverState *bs,
|
||||
uint64_t cluster_offset);
|
||||
|
||||
static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||
{
|
||||
@ -121,10 +122,8 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
qdict_extract_subqdict(options, &encryptopts, "encrypt.");
|
||||
encryptfmt = qdict_get_try_str(encryptopts, "format");
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
ret = -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -351,10 +350,11 @@ static int qcow_reopen_prepare(BDRVReopenState *state,
|
||||
* return 0 if not allocated, 1 if *result is assigned, and negative
|
||||
* errno on failure.
|
||||
*/
|
||||
static int get_cluster_offset(BlockDriverState *bs,
|
||||
uint64_t offset, int allocate,
|
||||
int compressed_size,
|
||||
int n_start, int n_end, uint64_t *result)
|
||||
static int coroutine_fn get_cluster_offset(BlockDriverState *bs,
|
||||
uint64_t offset, int allocate,
|
||||
int compressed_size,
|
||||
int n_start, int n_end,
|
||||
uint64_t *result)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int min_index, i, j, l1_index, l2_index, ret;
|
||||
@ -381,9 +381,9 @@ static int get_cluster_offset(BlockDriverState *bs,
|
||||
s->l1_table[l1_index] = l2_offset;
|
||||
tmp = cpu_to_be64(l2_offset);
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
|
||||
ret = bdrv_pwrite_sync(bs->file,
|
||||
s->l1_table_offset + l1_index * sizeof(tmp),
|
||||
sizeof(tmp), &tmp, 0);
|
||||
ret = bdrv_co_pwrite_sync(bs->file,
|
||||
s->l1_table_offset + l1_index * sizeof(tmp),
|
||||
sizeof(tmp), &tmp, 0);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -414,14 +414,14 @@ static int get_cluster_offset(BlockDriverState *bs,
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
|
||||
if (new_l2_table) {
|
||||
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
|
||||
ret = bdrv_pwrite_sync(bs->file, l2_offset,
|
||||
s->l2_size * sizeof(uint64_t), l2_table, 0);
|
||||
ret = bdrv_co_pwrite_sync(bs->file, l2_offset,
|
||||
s->l2_size * sizeof(uint64_t), l2_table, 0);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
ret = bdrv_pread(bs->file, l2_offset, s->l2_size * sizeof(uint64_t),
|
||||
l2_table, 0);
|
||||
ret = bdrv_co_pread(bs->file, l2_offset,
|
||||
s->l2_size * sizeof(uint64_t), l2_table, 0);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -453,8 +453,8 @@ static int get_cluster_offset(BlockDriverState *bs,
|
||||
cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size);
|
||||
/* write the cluster content */
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
|
||||
ret = bdrv_pwrite(bs->file, cluster_offset, s->cluster_size,
|
||||
s->cluster_cache, 0);
|
||||
ret = bdrv_co_pwrite(bs->file, cluster_offset, s->cluster_size,
|
||||
s->cluster_cache, 0);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -469,8 +469,9 @@ static int get_cluster_offset(BlockDriverState *bs,
|
||||
if (cluster_offset + s->cluster_size > INT64_MAX) {
|
||||
return -E2BIG;
|
||||
}
|
||||
ret = bdrv_truncate(bs->file, cluster_offset + s->cluster_size,
|
||||
false, PREALLOC_MODE_OFF, 0, NULL);
|
||||
ret = bdrv_co_truncate(bs->file,
|
||||
cluster_offset + s->cluster_size,
|
||||
false, PREALLOC_MODE_OFF, 0, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -492,9 +493,9 @@ static int get_cluster_offset(BlockDriverState *bs,
|
||||
return -EIO;
|
||||
}
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
|
||||
ret = bdrv_pwrite(bs->file, cluster_offset + i,
|
||||
BDRV_SECTOR_SIZE,
|
||||
s->cluster_data, 0);
|
||||
ret = bdrv_co_pwrite(bs->file, cluster_offset + i,
|
||||
BDRV_SECTOR_SIZE,
|
||||
s->cluster_data, 0);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -514,8 +515,8 @@ static int get_cluster_offset(BlockDriverState *bs,
|
||||
} else {
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
|
||||
}
|
||||
ret = bdrv_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
|
||||
sizeof(tmp), &tmp, 0);
|
||||
ret = bdrv_co_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
|
||||
sizeof(tmp), &tmp, 0);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -585,7 +586,8 @@ static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
|
||||
static int coroutine_fn decompress_cluster(BlockDriverState *bs,
|
||||
uint64_t cluster_offset)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int ret, csize;
|
||||
@ -596,7 +598,7 @@ static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
|
||||
csize = cluster_offset >> (63 - s->cluster_bits);
|
||||
csize &= (s->cluster_size - 1);
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
|
||||
ret = bdrv_pread(bs->file, coffset, csize, s->cluster_data, 0);
|
||||
ret = bdrv_co_pread(bs->file, coffset, csize, s->cluster_data, 0);
|
||||
if (ret < 0)
|
||||
return -1;
|
||||
if (decompress_buffer(s->cluster_cache, s->cluster_size,
|
||||
@ -628,7 +630,6 @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
uint8_t *buf;
|
||||
void *orig_buf;
|
||||
|
||||
assert(!flags);
|
||||
if (qiov->niov > 1) {
|
||||
buf = orig_buf = qemu_try_blockalign(bs, qiov->size);
|
||||
if (buf == NULL) {
|
||||
@ -725,7 +726,6 @@ static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
uint8_t *buf;
|
||||
void *orig_buf;
|
||||
|
||||
assert(!flags);
|
||||
s->cluster_cache_offset = -1; /* disable compressed cache */
|
||||
|
||||
/* We must always copy the iov when encrypting, so we
|
||||
@ -890,14 +890,14 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
|
||||
}
|
||||
|
||||
/* write all the data */
|
||||
ret = blk_pwrite(qcow_blk, 0, sizeof(header), &header, 0);
|
||||
ret = blk_co_pwrite(qcow_blk, 0, sizeof(header), &header, 0);
|
||||
if (ret < 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (qcow_opts->has_backing_file) {
|
||||
ret = blk_pwrite(qcow_blk, sizeof(header), backing_filename_len,
|
||||
qcow_opts->backing_file, 0);
|
||||
ret = blk_co_pwrite(qcow_blk, sizeof(header), backing_filename_len,
|
||||
qcow_opts->backing_file, 0);
|
||||
if (ret < 0) {
|
||||
goto exit;
|
||||
}
|
||||
@ -906,8 +906,8 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
|
||||
tmp = g_malloc0(BDRV_SECTOR_SIZE);
|
||||
for (i = 0; i < DIV_ROUND_UP(sizeof(uint64_t) * l1_size, BDRV_SECTOR_SIZE);
|
||||
i++) {
|
||||
ret = blk_pwrite(qcow_blk, header_size + BDRV_SECTOR_SIZE * i,
|
||||
BDRV_SECTOR_SIZE, tmp, 0);
|
||||
ret = blk_co_pwrite(qcow_blk, header_size + BDRV_SECTOR_SIZE * i,
|
||||
BDRV_SECTOR_SIZE, tmp, 0);
|
||||
if (ret < 0) {
|
||||
g_free(tmp);
|
||||
goto exit;
|
||||
|
@ -955,8 +955,8 @@ static void set_readonly_helper(gpointer bitmap, gpointer value)
|
||||
* If header_updated is not NULL then it is set appropriately regardless of
|
||||
* the return value.
|
||||
*/
|
||||
bool qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated,
|
||||
Error **errp)
|
||||
bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs,
|
||||
bool *header_updated, Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
Qcow2BitmapList *bm_list;
|
||||
|
@ -31,7 +31,8 @@
|
||||
#include "qemu/memalign.h"
|
||||
#include "trace.h"
|
||||
|
||||
int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
|
||||
int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs,
|
||||
uint64_t exact_size)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int new_l1_size, i, ret;
|
||||
@ -47,14 +48,14 @@ int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
|
||||
#endif
|
||||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
|
||||
ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
|
||||
new_l1_size * L1E_SIZE,
|
||||
(s->l1_size - new_l1_size) * L1E_SIZE, 0);
|
||||
ret = bdrv_co_pwrite_zeroes(bs->file,
|
||||
s->l1_table_offset + new_l1_size * L1E_SIZE,
|
||||
(s->l1_size - new_l1_size) * L1E_SIZE, 0);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = bdrv_flush(bs->file->bs);
|
||||
ret = bdrv_co_flush(bs->file->bs);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
@ -823,10 +824,10 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
|
||||
*
|
||||
* Return 0 on success and -errno in error cases
|
||||
*/
|
||||
int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
int compressed_size,
|
||||
uint64_t *host_offset)
|
||||
int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
int compressed_size,
|
||||
uint64_t *host_offset)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int l2_index, ret;
|
||||
@ -1488,8 +1489,9 @@ static int coroutine_fn handle_dependencies(BlockDriverState *bs,
|
||||
*
|
||||
* -errno: in error cases
|
||||
*/
|
||||
static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
|
||||
uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
|
||||
static int coroutine_fn handle_copied(BlockDriverState *bs,
|
||||
uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes,
|
||||
QCowL2Meta **m)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int l2_index;
|
||||
@ -1653,8 +1655,9 @@ static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
|
||||
*
|
||||
* -errno: in error cases
|
||||
*/
|
||||
static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
|
||||
uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
|
||||
static int coroutine_fn handle_alloc(BlockDriverState *bs,
|
||||
uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes,
|
||||
QCowL2Meta **m)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int l2_index;
|
||||
|
@ -97,7 +97,7 @@ static void update_max_refcount_table_index(BDRVQcow2State *s)
|
||||
s->max_refcount_table_index = i;
|
||||
}
|
||||
|
||||
int qcow2_refcount_init(BlockDriverState *bs)
|
||||
int coroutine_fn qcow2_refcount_init(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
unsigned int refcount_table_size2, i;
|
||||
@ -118,8 +118,8 @@ int qcow2_refcount_init(BlockDriverState *bs)
|
||||
goto fail;
|
||||
}
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
|
||||
ret = bdrv_pread(bs->file, s->refcount_table_offset,
|
||||
refcount_table_size2, s->refcount_table, 0);
|
||||
ret = bdrv_co_pread(bs->file, s->refcount_table_offset,
|
||||
refcount_table_size2, s->refcount_table, 0);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
@ -3559,8 +3559,8 @@ static int64_t get_refblock_offset(BlockDriverState *bs, uint64_t offset)
|
||||
return covering_refblock_offset;
|
||||
}
|
||||
|
||||
static int qcow2_discard_refcount_block(BlockDriverState *bs,
|
||||
uint64_t discard_block_offs)
|
||||
static int coroutine_fn
|
||||
qcow2_discard_refcount_block(BlockDriverState *bs, uint64_t discard_block_offs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t refblock_offs;
|
||||
@ -3616,7 +3616,7 @@ static int qcow2_discard_refcount_block(BlockDriverState *bs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qcow2_shrink_reftable(BlockDriverState *bs)
|
||||
int coroutine_fn qcow2_shrink_reftable(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint64_t *reftable_tmp =
|
||||
@ -3657,9 +3657,9 @@ int qcow2_shrink_reftable(BlockDriverState *bs)
|
||||
reftable_tmp[i] = unused_block ? 0 : cpu_to_be64(s->refcount_table[i]);
|
||||
}
|
||||
|
||||
ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset,
|
||||
s->refcount_table_size * REFTABLE_ENTRY_SIZE,
|
||||
reftable_tmp, 0);
|
||||
ret = bdrv_co_pwrite_sync(bs->file, s->refcount_table_offset,
|
||||
s->refcount_table_size * REFTABLE_ENTRY_SIZE,
|
||||
reftable_tmp, 0);
|
||||
/*
|
||||
* If the write in the reftable failed the image may contain a partially
|
||||
* overwritten reftable. In this case it would be better to clear the
|
||||
|
@ -441,9 +441,9 @@ int coroutine_fn qcow2_check_read_snapshot_table(BlockDriverState *bs,
|
||||
} QEMU_PACKED snapshot_table_pointer;
|
||||
|
||||
/* qcow2_do_open() discards this information in check mode */
|
||||
ret = bdrv_pread(bs->file, offsetof(QCowHeader, nb_snapshots),
|
||||
sizeof(snapshot_table_pointer), &snapshot_table_pointer,
|
||||
0);
|
||||
ret = bdrv_co_pread(bs->file, offsetof(QCowHeader, nb_snapshots),
|
||||
sizeof(snapshot_table_pointer), &snapshot_table_pointer,
|
||||
0);
|
||||
if (ret < 0) {
|
||||
result->check_errors++;
|
||||
fprintf(stderr, "ERROR failed to read the snapshot table pointer from "
|
||||
|
@ -1306,7 +1306,7 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
|
||||
uint64_t l1_vm_state_index;
|
||||
bool update_header = false;
|
||||
|
||||
ret = bdrv_pread(bs->file, 0, sizeof(header), &header, 0);
|
||||
ret = bdrv_co_pread(bs->file, 0, sizeof(header), &header, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not read qcow2 header");
|
||||
goto fail;
|
||||
@ -1382,9 +1382,9 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
|
||||
if (header.header_length > sizeof(header)) {
|
||||
s->unknown_header_fields_size = header.header_length - sizeof(header);
|
||||
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
|
||||
ret = bdrv_pread(bs->file, sizeof(header),
|
||||
s->unknown_header_fields_size,
|
||||
s->unknown_header_fields, 0);
|
||||
ret = bdrv_co_pread(bs->file, sizeof(header),
|
||||
s->unknown_header_fields_size,
|
||||
s->unknown_header_fields, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
|
||||
"fields");
|
||||
@ -1579,8 +1579,8 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_size * L1E_SIZE,
|
||||
s->l1_table, 0);
|
||||
ret = bdrv_co_pread(bs->file, s->l1_table_offset, s->l1_size * L1E_SIZE,
|
||||
s->l1_table, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not read L1 table");
|
||||
goto fail;
|
||||
@ -1699,8 +1699,8 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
|
||||
}
|
||||
|
||||
s->image_backing_file = g_malloc(len + 1);
|
||||
ret = bdrv_pread(bs->file, header.backing_file_offset, len,
|
||||
s->image_backing_file, 0);
|
||||
ret = bdrv_co_pread(bs->file, header.backing_file_offset, len,
|
||||
s->image_backing_file, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not read backing file name");
|
||||
goto fail;
|
||||
@ -1905,11 +1905,11 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
.errp = errp,
|
||||
.ret = -EINPROGRESS
|
||||
};
|
||||
int ret;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initialise locks */
|
||||
@ -3679,7 +3679,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
cpu_to_be64(QCOW2_INCOMPAT_EXTL2);
|
||||
}
|
||||
|
||||
ret = blk_pwrite(blk, 0, cluster_size, header, 0);
|
||||
ret = blk_co_pwrite(blk, 0, cluster_size, header, 0);
|
||||
g_free(header);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not write qcow2 header");
|
||||
@ -3689,7 +3689,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
/* Write a refcount table with one refcount block */
|
||||
refcount_table = g_malloc0(2 * cluster_size);
|
||||
refcount_table[0] = cpu_to_be64(2 * cluster_size);
|
||||
ret = blk_pwrite(blk, cluster_size, 2 * cluster_size, refcount_table, 0);
|
||||
ret = blk_co_pwrite(blk, cluster_size, 2 * cluster_size, refcount_table, 0);
|
||||
g_free(refcount_table);
|
||||
|
||||
if (ret < 0) {
|
||||
@ -3744,8 +3744,8 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
}
|
||||
|
||||
/* Okay, now that we have a valid image, let's give it the right size */
|
||||
ret = blk_truncate(blk, qcow2_opts->size, false, qcow2_opts->preallocation,
|
||||
0, errp);
|
||||
ret = blk_co_truncate(blk, qcow2_opts->size, false,
|
||||
qcow2_opts->preallocation, 0, errp);
|
||||
if (ret < 0) {
|
||||
error_prepend(errp, "Could not resize image: ");
|
||||
goto out;
|
||||
@ -5287,8 +5287,8 @@ static int64_t qcow2_check_vmstate_request(BlockDriverState *bs,
|
||||
return pos;
|
||||
}
|
||||
|
||||
static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
|
||||
int64_t pos)
|
||||
static coroutine_fn int qcow2_save_vmstate(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov, int64_t pos)
|
||||
{
|
||||
int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
|
||||
if (offset < 0) {
|
||||
@ -5299,8 +5299,8 @@ static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
|
||||
return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0);
|
||||
}
|
||||
|
||||
static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
|
||||
int64_t pos)
|
||||
static coroutine_fn int qcow2_load_vmstate(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov, int64_t pos)
|
||||
{
|
||||
int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
|
||||
if (offset < 0) {
|
||||
|
@ -846,7 +846,7 @@ int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
|
||||
Error **errp);
|
||||
|
||||
/* qcow2-refcount.c functions */
|
||||
int qcow2_refcount_init(BlockDriverState *bs);
|
||||
int coroutine_fn qcow2_refcount_init(BlockDriverState *bs);
|
||||
void qcow2_refcount_close(BlockDriverState *bs);
|
||||
|
||||
int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
|
||||
@ -893,14 +893,14 @@ int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
|
||||
int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque, Error **errp);
|
||||
int qcow2_shrink_reftable(BlockDriverState *bs);
|
||||
int coroutine_fn qcow2_shrink_reftable(BlockDriverState *bs);
|
||||
int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
|
||||
int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs);
|
||||
|
||||
/* qcow2-cluster.c functions */
|
||||
int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
||||
bool exact_size);
|
||||
int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size);
|
||||
int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size);
|
||||
int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
|
||||
int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
|
||||
uint8_t *buf, int nb_sectors, bool enc, Error **errp);
|
||||
@ -911,10 +911,10 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||
int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||
unsigned int *bytes,
|
||||
uint64_t *host_offset, QCowL2Meta **m);
|
||||
int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
int compressed_size,
|
||||
uint64_t *host_offset);
|
||||
int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
int compressed_size,
|
||||
uint64_t *host_offset);
|
||||
void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
|
||||
uint64_t *coffset, int *csize);
|
||||
|
||||
@ -982,8 +982,8 @@ void qcow2_cache_discard(Qcow2Cache *c, void *table);
|
||||
int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||
void **refcount_table,
|
||||
int64_t *refcount_table_size);
|
||||
bool qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated,
|
||||
Error **errp);
|
||||
bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs,
|
||||
bool *header_updated, Error **errp);
|
||||
bool qcow2_get_bitmap_info_list(BlockDriverState *bs,
|
||||
Qcow2BitmapInfoList **info_list, Error **errp);
|
||||
int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
|
||||
@ -991,13 +991,13 @@ int qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp);
|
||||
bool qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs,
|
||||
bool release_stored, Error **errp);
|
||||
int qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
|
||||
bool qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs,
|
||||
const char *name,
|
||||
uint32_t granularity,
|
||||
Error **errp);
|
||||
int qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs,
|
||||
const char *name,
|
||||
Error **errp);
|
||||
bool coroutine_fn qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs,
|
||||
const char *name,
|
||||
uint32_t granularity,
|
||||
Error **errp);
|
||||
int coroutine_fn qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs,
|
||||
const char *name,
|
||||
Error **errp);
|
||||
bool qcow2_supports_persistent_dirty_bitmap(BlockDriverState *bs);
|
||||
uint64_t qcow2_get_persistent_dirty_bitmap_size(BlockDriverState *bs,
|
||||
uint32_t cluster_size);
|
||||
|
@ -100,7 +100,7 @@ static int coroutine_fn qed_write_table(BDRVQEDState *s, uint64_t offset,
|
||||
}
|
||||
|
||||
if (flush) {
|
||||
ret = bdrv_flush(s->bs);
|
||||
ret = bdrv_co_flush(s->bs);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
21
block/qed.c
21
block/qed.c
@ -387,7 +387,7 @@ static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options,
|
||||
int64_t file_size;
|
||||
int ret;
|
||||
|
||||
ret = bdrv_pread(bs->file, 0, sizeof(le_header), &le_header, 0);
|
||||
ret = bdrv_co_pread(bs->file, 0, sizeof(le_header), &le_header, 0);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Failed to read QED header");
|
||||
return ret;
|
||||
@ -492,7 +492,7 @@ static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options,
|
||||
}
|
||||
|
||||
/* From here on only known autoclear feature bits are valid */
|
||||
bdrv_flush(bs->file->bs);
|
||||
bdrv_co_flush(bs->file->bs);
|
||||
}
|
||||
|
||||
s->l1_table = qed_alloc_table(s);
|
||||
@ -561,11 +561,11 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
.errp = errp,
|
||||
.ret = -EINPROGRESS
|
||||
};
|
||||
int ret;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
bdrv_qed_init_state(bs);
|
||||
@ -693,7 +693,7 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
|
||||
* The QED format associates file length with allocation status,
|
||||
* so a new file (which is empty) must have a length of 0.
|
||||
*/
|
||||
ret = blk_truncate(blk, 0, true, PREALLOC_MODE_OFF, 0, errp);
|
||||
ret = blk_co_truncate(blk, 0, true, PREALLOC_MODE_OFF, 0, errp);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@ -712,18 +712,18 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
|
||||
}
|
||||
|
||||
qed_header_cpu_to_le(&header, &le_header);
|
||||
ret = blk_pwrite(blk, 0, sizeof(le_header), &le_header, 0);
|
||||
ret = blk_co_pwrite(blk, 0, sizeof(le_header), &le_header, 0);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
ret = blk_pwrite(blk, sizeof(le_header), header.backing_filename_size,
|
||||
ret = blk_co_pwrite(blk, sizeof(le_header), header.backing_filename_size,
|
||||
qed_opts->backing_file, 0);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
l1_table = g_malloc0(l1_size);
|
||||
ret = blk_pwrite(blk, header.l1_table_offset, l1_size, l1_table, 0);
|
||||
ret = blk_co_pwrite(blk, header.l1_table_offset, l1_size, l1_table, 0);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@ -1395,7 +1395,6 @@ static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
|
||||
int64_t sector_num, int nb_sectors,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
{
|
||||
assert(!flags);
|
||||
return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
|
||||
}
|
||||
|
||||
|
@ -258,6 +258,8 @@ static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
qemu_iovec_add(&local_qiov, buf, 512);
|
||||
qemu_iovec_concat(&local_qiov, qiov, 512, qiov->size - 512);
|
||||
qiov = &local_qiov;
|
||||
|
||||
flags &= ~BDRV_REQ_REGISTERED_BUF;
|
||||
}
|
||||
|
||||
ret = raw_adjust_offset(bs, &offset, bytes, true);
|
||||
@ -458,8 +460,8 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
file_role = BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY;
|
||||
}
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
file_role, false, errp);
|
||||
bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
file_role, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -88,11 +88,9 @@ static int replication_open(BlockDriverState *bs, QDict *options,
|
||||
const char *mode;
|
||||
const char *top_id;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
@ -261,7 +259,6 @@ static coroutine_fn int replication_co_writev(BlockDriverState *bs,
|
||||
int ret;
|
||||
int64_t n;
|
||||
|
||||
assert(!flags);
|
||||
ret = replication_get_io_status(s);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
|
@ -82,9 +82,9 @@ static void snapshot_access_refresh_filename(BlockDriverState *bs)
|
||||
static int snapshot_access_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -151,41 +151,29 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a pointer to the child BDS pointer to which we can fall
|
||||
* Return a pointer to child of given BDS to which we can fall
|
||||
* back if the given BDS does not support snapshots.
|
||||
* Return NULL if there is no BDS to (safely) fall back to.
|
||||
*
|
||||
* We need to return an indirect pointer because bdrv_snapshot_goto()
|
||||
* has to modify the BdrvChild pointer.
|
||||
*/
|
||||
static BdrvChild **bdrv_snapshot_fallback_ptr(BlockDriverState *bs)
|
||||
static BdrvChild *bdrv_snapshot_fallback_child(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild **fallback;
|
||||
BdrvChild *fallback = bdrv_primary_child(bs);
|
||||
BdrvChild *child;
|
||||
|
||||
/*
|
||||
* The only BdrvChild pointers that are safe to modify (and which
|
||||
* we can thus return a reference to) are bs->file and
|
||||
* bs->backing.
|
||||
*/
|
||||
fallback = &bs->file;
|
||||
if (!*fallback && bs->drv && bs->drv->is_filter) {
|
||||
fallback = &bs->backing;
|
||||
}
|
||||
|
||||
if (!*fallback) {
|
||||
/* We allow fallback only to primary child */
|
||||
if (!fallback) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that there are no other children that would need to be
|
||||
* snapshotted. If there are, it is not safe to fall back to
|
||||
* *fallback.
|
||||
* fallback.
|
||||
*/
|
||||
QLIST_FOREACH(child, &bs->children, next) {
|
||||
if (child->role & (BDRV_CHILD_DATA | BDRV_CHILD_METADATA |
|
||||
BDRV_CHILD_FILTERED) &&
|
||||
child != *fallback)
|
||||
child != fallback)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@ -196,8 +184,7 @@ static BdrvChild **bdrv_snapshot_fallback_ptr(BlockDriverState *bs)
|
||||
|
||||
static BlockDriverState *bdrv_snapshot_fallback(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild **child_ptr = bdrv_snapshot_fallback_ptr(bs);
|
||||
return child_ptr ? (*child_ptr)->bs : NULL;
|
||||
return child_bs(bdrv_snapshot_fallback_child(bs));
|
||||
}
|
||||
|
||||
int bdrv_can_snapshot(BlockDriverState *bs)
|
||||
@ -244,7 +231,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
||||
Error **errp)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
BdrvChild **fallback_ptr;
|
||||
BdrvChild *fallback;
|
||||
int ret, open_ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
@ -267,13 +254,13 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
||||
return ret;
|
||||
}
|
||||
|
||||
fallback_ptr = bdrv_snapshot_fallback_ptr(bs);
|
||||
if (fallback_ptr) {
|
||||
fallback = bdrv_snapshot_fallback_child(bs);
|
||||
if (fallback) {
|
||||
QDict *options;
|
||||
QDict *file_options;
|
||||
Error *local_err = NULL;
|
||||
BlockDriverState *fallback_bs = (*fallback_ptr)->bs;
|
||||
char *subqdict_prefix = g_strdup_printf("%s.", (*fallback_ptr)->name);
|
||||
BlockDriverState *fallback_bs = fallback->bs;
|
||||
char *subqdict_prefix = g_strdup_printf("%s.", fallback->name);
|
||||
|
||||
options = qdict_clone_shallow(bs->options);
|
||||
|
||||
@ -284,8 +271,8 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
||||
qobject_unref(file_options);
|
||||
g_free(subqdict_prefix);
|
||||
|
||||
/* Force .bdrv_open() below to re-attach fallback_bs on *fallback_ptr */
|
||||
qdict_put_str(options, (*fallback_ptr)->name,
|
||||
/* Force .bdrv_open() below to re-attach fallback_bs on fallback */
|
||||
qdict_put_str(options, fallback->name,
|
||||
bdrv_get_node_name(fallback_bs));
|
||||
|
||||
/* Now close bs, apply the snapshot on fallback_bs, and re-open bs */
|
||||
@ -294,8 +281,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
/* .bdrv_open() will re-attach it */
|
||||
bdrv_unref_child(bs, *fallback_ptr);
|
||||
*fallback_ptr = NULL;
|
||||
bdrv_unref_child(bs, fallback);
|
||||
|
||||
ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp);
|
||||
open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err);
|
||||
@ -309,15 +295,12 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
/*
|
||||
* fallback_ptr is &bs->file or &bs->backing. *fallback_ptr
|
||||
* was closed above and set to NULL, but the .bdrv_open() call
|
||||
* has opened it again, because we set the respective option
|
||||
* (with the qdict_put_str() call above).
|
||||
* Assert that .bdrv_open() has attached some child on
|
||||
* *fallback_ptr, and that it has attached the one we wanted
|
||||
* it to (i.e., fallback_bs).
|
||||
* fallback was a primary child. It was closed above and set to NULL,
|
||||
* but the .bdrv_open() call has opened it again, because we set the
|
||||
* respective option (with the qdict_put_str() call above).
|
||||
* Assert that .bdrv_open() has attached the right BDS as primary child.
|
||||
*/
|
||||
assert(*fallback_ptr && fallback_bs == (*fallback_ptr)->bs);
|
||||
assert(bdrv_primary_bs(bs) == fallback_bs);
|
||||
bdrv_unref(fallback_bs);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1129,9 +1129,9 @@ static coroutine_fn int ssh_co_readv(BlockDriverState *bs,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ssh_write(BDRVSSHState *s, BlockDriverState *bs,
|
||||
int64_t offset, size_t size,
|
||||
QEMUIOVector *qiov)
|
||||
static coroutine_fn int ssh_write(BDRVSSHState *s, BlockDriverState *bs,
|
||||
int64_t offset, size_t size,
|
||||
QEMUIOVector *qiov)
|
||||
{
|
||||
ssize_t r;
|
||||
size_t written;
|
||||
@ -1196,7 +1196,6 @@ static coroutine_fn int ssh_co_writev(BlockDriverState *bs,
|
||||
BDRVSSHState *s = bs->opaque;
|
||||
int ret;
|
||||
|
||||
assert(!flags);
|
||||
qemu_co_mutex_lock(&s->lock);
|
||||
ret = ssh_write(s, bs, sector_num * BDRV_SECTOR_SIZE,
|
||||
nb_sectors * BDRV_SECTOR_SIZE, qiov);
|
||||
|
@ -78,11 +78,9 @@ static int throttle_open(BlockDriverState *bs, QDict *options,
|
||||
char *group;
|
||||
int ret;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
bs->supported_write_flags = bs->file->bs->supported_write_flags |
|
||||
BDRV_REQ_WRITE_UNCHANGED;
|
||||
|
24
block/vdi.c
24
block/vdi.c
@ -377,10 +377,9 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
int ret;
|
||||
QemuUUID uuid_link, uuid_parent;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
logout("\n");
|
||||
@ -664,7 +663,8 @@ vdi_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
* so this full-cluster write does not overlap a partial write
|
||||
* of the same cluster, issued from the "else" branch.
|
||||
*/
|
||||
ret = bdrv_pwrite(bs->file, data_offset, s->block_size, block, 0);
|
||||
ret = bdrv_co_pwrite(bs->file, data_offset, s->block_size, block,
|
||||
0);
|
||||
qemu_co_rwlock_unlock(&s->bmap_lock);
|
||||
} else {
|
||||
nonallocating_write:
|
||||
@ -709,7 +709,7 @@ nonallocating_write:
|
||||
assert(VDI_IS_ALLOCATED(bmap_first));
|
||||
*header = s->header;
|
||||
vdi_header_to_le(header);
|
||||
ret = bdrv_pwrite(bs->file, 0, sizeof(*header), header, 0);
|
||||
ret = bdrv_co_pwrite(bs->file, 0, sizeof(*header), header, 0);
|
||||
g_free(header);
|
||||
|
||||
if (ret < 0) {
|
||||
@ -726,8 +726,8 @@ nonallocating_write:
|
||||
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
|
||||
logout("will write %u block map sectors starting from entry %u\n",
|
||||
n_sectors, bmap_first);
|
||||
ret = bdrv_pwrite(bs->file, offset * SECTOR_SIZE,
|
||||
n_sectors * SECTOR_SIZE, base, 0);
|
||||
ret = bdrv_co_pwrite(bs->file, offset * SECTOR_SIZE,
|
||||
n_sectors * SECTOR_SIZE, base, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -845,7 +845,7 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options,
|
||||
vdi_header_print(&header);
|
||||
}
|
||||
vdi_header_to_le(&header);
|
||||
ret = blk_pwrite(blk, offset, sizeof(header), &header, 0);
|
||||
ret = blk_co_pwrite(blk, offset, sizeof(header), &header, 0);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Error writing header");
|
||||
goto exit;
|
||||
@ -866,7 +866,7 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options,
|
||||
bmap[i] = VDI_UNALLOCATED;
|
||||
}
|
||||
}
|
||||
ret = blk_pwrite(blk, offset, bmap_size, bmap, 0);
|
||||
ret = blk_co_pwrite(blk, offset, bmap_size, bmap, 0);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Error writing bmap");
|
||||
goto exit;
|
||||
@ -875,8 +875,8 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options,
|
||||
}
|
||||
|
||||
if (image_type == VDI_TYPE_STATIC) {
|
||||
ret = blk_truncate(blk, offset + blocks * block_size, false,
|
||||
PREALLOC_MODE_OFF, 0, errp);
|
||||
ret = blk_co_truncate(blk, offset + blocks * block_size, false,
|
||||
PREALLOC_MODE_OFF, 0, errp);
|
||||
if (ret < 0) {
|
||||
error_prepend(errp, "Failed to statically allocate file");
|
||||
goto exit;
|
||||
|
16
block/vhdx.c
16
block/vhdx.c
@ -1001,10 +1001,9 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
uint64_t signature;
|
||||
Error *local_err = NULL;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
s->bat = NULL;
|
||||
@ -1342,7 +1341,6 @@ static coroutine_fn int vhdx_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
uint64_t bat_prior_offset = 0;
|
||||
bool bat_update = false;
|
||||
|
||||
assert(!flags);
|
||||
qemu_iovec_init(&hd_qiov, qiov->niov);
|
||||
|
||||
qemu_co_mutex_lock(&s->lock);
|
||||
@ -2012,15 +2010,15 @@ static int coroutine_fn vhdx_co_create(BlockdevCreateOptions *opts,
|
||||
creator = g_utf8_to_utf16("QEMU v" QEMU_VERSION, -1, NULL,
|
||||
&creator_items, NULL);
|
||||
signature = cpu_to_le64(VHDX_FILE_SIGNATURE);
|
||||
ret = blk_pwrite(blk, VHDX_FILE_ID_OFFSET, sizeof(signature), &signature,
|
||||
0);
|
||||
ret = blk_co_pwrite(blk, VHDX_FILE_ID_OFFSET, sizeof(signature), &signature,
|
||||
0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Failed to write file signature");
|
||||
goto delete_and_exit;
|
||||
}
|
||||
if (creator) {
|
||||
ret = blk_pwrite(blk, VHDX_FILE_ID_OFFSET + sizeof(signature),
|
||||
creator_items * sizeof(gunichar2), creator, 0);
|
||||
ret = blk_co_pwrite(blk, VHDX_FILE_ID_OFFSET + sizeof(signature),
|
||||
creator_items * sizeof(gunichar2), creator, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Failed to write creator field");
|
||||
goto delete_and_exit;
|
||||
|
95
block/vmdk.c
95
block/vmdk.c
@ -1308,10 +1308,9 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
uint32_t magic;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
buf = vmdk_read_desc(bs->file, 0, errp);
|
||||
@ -1404,13 +1403,13 @@ static void vmdk_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
* [@skip_start_sector, @skip_end_sector) is not copied or written, and leave
|
||||
* it for call to write user data in the request.
|
||||
*/
|
||||
static int get_whole_cluster(BlockDriverState *bs,
|
||||
VmdkExtent *extent,
|
||||
uint64_t cluster_offset,
|
||||
uint64_t offset,
|
||||
uint64_t skip_start_bytes,
|
||||
uint64_t skip_end_bytes,
|
||||
bool zeroed)
|
||||
static int coroutine_fn get_whole_cluster(BlockDriverState *bs,
|
||||
VmdkExtent *extent,
|
||||
uint64_t cluster_offset,
|
||||
uint64_t offset,
|
||||
uint64_t skip_start_bytes,
|
||||
uint64_t skip_end_bytes,
|
||||
bool zeroed)
|
||||
{
|
||||
int ret = VMDK_OK;
|
||||
int64_t cluster_bytes;
|
||||
@ -1441,16 +1440,16 @@ static int get_whole_cluster(BlockDriverState *bs,
|
||||
if (copy_from_backing) {
|
||||
/* qcow2 emits this on bs->file instead of bs->backing */
|
||||
BLKDBG_EVENT(extent->file, BLKDBG_COW_READ);
|
||||
ret = bdrv_pread(bs->backing, offset, skip_start_bytes,
|
||||
whole_grain, 0);
|
||||
ret = bdrv_co_pread(bs->backing, offset, skip_start_bytes,
|
||||
whole_grain, 0);
|
||||
if (ret < 0) {
|
||||
ret = VMDK_ERROR;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE);
|
||||
ret = bdrv_pwrite(extent->file, cluster_offset, skip_start_bytes,
|
||||
whole_grain, 0);
|
||||
ret = bdrv_co_pwrite(extent->file, cluster_offset, skip_start_bytes,
|
||||
whole_grain, 0);
|
||||
if (ret < 0) {
|
||||
ret = VMDK_ERROR;
|
||||
goto exit;
|
||||
@ -1461,18 +1460,18 @@ static int get_whole_cluster(BlockDriverState *bs,
|
||||
if (copy_from_backing) {
|
||||
/* qcow2 emits this on bs->file instead of bs->backing */
|
||||
BLKDBG_EVENT(extent->file, BLKDBG_COW_READ);
|
||||
ret = bdrv_pread(bs->backing, offset + skip_end_bytes,
|
||||
cluster_bytes - skip_end_bytes,
|
||||
whole_grain + skip_end_bytes, 0);
|
||||
ret = bdrv_co_pread(bs->backing, offset + skip_end_bytes,
|
||||
cluster_bytes - skip_end_bytes,
|
||||
whole_grain + skip_end_bytes, 0);
|
||||
if (ret < 0) {
|
||||
ret = VMDK_ERROR;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE);
|
||||
ret = bdrv_pwrite(extent->file, cluster_offset + skip_end_bytes,
|
||||
cluster_bytes - skip_end_bytes,
|
||||
whole_grain + skip_end_bytes, 0);
|
||||
ret = bdrv_co_pwrite(extent->file, cluster_offset + skip_end_bytes,
|
||||
cluster_bytes - skip_end_bytes,
|
||||
whole_grain + skip_end_bytes, 0);
|
||||
if (ret < 0) {
|
||||
ret = VMDK_ERROR;
|
||||
goto exit;
|
||||
@ -1485,29 +1484,29 @@ exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
|
||||
uint32_t offset)
|
||||
static int coroutine_fn vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
|
||||
uint32_t offset)
|
||||
{
|
||||
offset = cpu_to_le32(offset);
|
||||
/* update L2 table */
|
||||
BLKDBG_EVENT(extent->file, BLKDBG_L2_UPDATE);
|
||||
if (bdrv_pwrite(extent->file,
|
||||
((int64_t)m_data->l2_offset * 512)
|
||||
+ (m_data->l2_index * sizeof(offset)),
|
||||
sizeof(offset), &offset, 0) < 0) {
|
||||
if (bdrv_co_pwrite(extent->file,
|
||||
((int64_t)m_data->l2_offset * 512)
|
||||
+ (m_data->l2_index * sizeof(offset)),
|
||||
sizeof(offset), &offset, 0) < 0) {
|
||||
return VMDK_ERROR;
|
||||
}
|
||||
/* update backup L2 table */
|
||||
if (extent->l1_backup_table_offset != 0) {
|
||||
m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
|
||||
if (bdrv_pwrite(extent->file,
|
||||
((int64_t)m_data->l2_offset * 512)
|
||||
+ (m_data->l2_index * sizeof(offset)),
|
||||
sizeof(offset), &offset, 0) < 0) {
|
||||
if (bdrv_co_pwrite(extent->file,
|
||||
((int64_t)m_data->l2_offset * 512)
|
||||
+ (m_data->l2_index * sizeof(offset)),
|
||||
sizeof(offset), &offset, 0) < 0) {
|
||||
return VMDK_ERROR;
|
||||
}
|
||||
}
|
||||
if (bdrv_flush(extent->file->bs) < 0) {
|
||||
if (bdrv_co_flush(extent->file->bs) < 0) {
|
||||
return VMDK_ERROR;
|
||||
}
|
||||
if (m_data->l2_cache_entry) {
|
||||
@ -1537,14 +1536,14 @@ static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
|
||||
* VMDK_UNALLOC if cluster is not mapped and @allocate is false.
|
||||
* VMDK_ERROR if failed.
|
||||
*/
|
||||
static int get_cluster_offset(BlockDriverState *bs,
|
||||
VmdkExtent *extent,
|
||||
VmdkMetaData *m_data,
|
||||
uint64_t offset,
|
||||
bool allocate,
|
||||
uint64_t *cluster_offset,
|
||||
uint64_t skip_start_bytes,
|
||||
uint64_t skip_end_bytes)
|
||||
static int coroutine_fn get_cluster_offset(BlockDriverState *bs,
|
||||
VmdkExtent *extent,
|
||||
VmdkMetaData *m_data,
|
||||
uint64_t offset,
|
||||
bool allocate,
|
||||
uint64_t *cluster_offset,
|
||||
uint64_t skip_start_bytes,
|
||||
uint64_t skip_end_bytes)
|
||||
{
|
||||
unsigned int l1_index, l2_offset, l2_index;
|
||||
int min_index, i, j;
|
||||
@ -1624,11 +1623,10 @@ static int get_cluster_offset(BlockDriverState *bs,
|
||||
}
|
||||
l2_table = (char *)extent->l2_cache + (min_index * l2_size_bytes);
|
||||
BLKDBG_EVENT(extent->file, BLKDBG_L2_LOAD);
|
||||
if (bdrv_pread(extent->file,
|
||||
if (bdrv_co_pread(extent->file,
|
||||
(int64_t)l2_offset * 512,
|
||||
l2_size_bytes,
|
||||
l2_table,
|
||||
0
|
||||
l2_table, 0
|
||||
) < 0) {
|
||||
return VMDK_ERROR;
|
||||
}
|
||||
@ -1899,7 +1897,8 @@ vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
|
||||
cluster_buf = g_malloc(buf_bytes);
|
||||
uncomp_buf = g_malloc(cluster_bytes);
|
||||
BLKDBG_EVENT(extent->file, BLKDBG_READ_COMPRESSED);
|
||||
ret = bdrv_pread(extent->file, cluster_offset, buf_bytes, cluster_buf, 0);
|
||||
ret = bdrv_co_pread(extent->file, cluster_offset, buf_bytes, cluster_buf,
|
||||
0);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@ -2144,8 +2143,8 @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
return length;
|
||||
}
|
||||
length = QEMU_ALIGN_UP(length, BDRV_SECTOR_SIZE);
|
||||
ret = bdrv_truncate(s->extents[i].file, length, false,
|
||||
PREALLOC_MODE_OFF, 0, NULL);
|
||||
ret = bdrv_co_truncate(s->extents[i].file, length, false,
|
||||
PREALLOC_MODE_OFF, 0, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -2586,7 +2585,7 @@ static int coroutine_fn vmdk_co_do_create(int64_t size,
|
||||
desc_offset = 0x200;
|
||||
}
|
||||
|
||||
ret = blk_pwrite(blk, desc_offset, desc_len, desc, 0);
|
||||
ret = blk_co_pwrite(blk, desc_offset, desc_len, desc, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not write description");
|
||||
goto exit;
|
||||
@ -2594,7 +2593,7 @@ static int coroutine_fn vmdk_co_do_create(int64_t size,
|
||||
/* bdrv_pwrite write padding zeros to align to sector, we don't need that
|
||||
* for description file */
|
||||
if (desc_offset == 0) {
|
||||
ret = blk_truncate(blk, desc_len, false, PREALLOC_MODE_OFF, 0, errp);
|
||||
ret = blk_co_truncate(blk, desc_len, false, PREALLOC_MODE_OFF, 0, errp);
|
||||
if (ret < 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
@ -233,10 +233,9 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
int ret;
|
||||
int64_t bs_size;
|
||||
|
||||
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_IMAGE, false, errp);
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
opts = qemu_opts_create(&vpc_runtime_opts, NULL, 0, &error_abort);
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include <dirent.h>
|
||||
#include <glib/gstdio.h>
|
||||
#include "qapi/error.h"
|
||||
#include "block/block_int.h"
|
||||
#include "block/qdict.h"
|
||||
@ -499,7 +500,7 @@ static bool valid_filename(const unsigned char *name)
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
(c >= 'a' && c <= 'z') ||
|
||||
c > 127 ||
|
||||
strchr("$%'-_@~`!(){}^#&.+,;=[]", c) != NULL))
|
||||
strchr(" $%'-_@~`!(){}^#&.+,;=[]", c) != NULL))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -2726,13 +2727,9 @@ static int handle_renames_and_mkdirs(BDRVVVFATState* s)
|
||||
mapping_t* mapping;
|
||||
int j, parent_path_len;
|
||||
|
||||
#ifdef __MINGW32__
|
||||
if (mkdir(commit->path))
|
||||
if (g_mkdir(commit->path, 0755)) {
|
||||
return -5;
|
||||
#else
|
||||
if (mkdir(commit->path, 0755))
|
||||
return -5;
|
||||
#endif
|
||||
}
|
||||
|
||||
mapping = insert_mapping(s, commit->param.mkdir.cluster,
|
||||
commit->param.mkdir.cluster + 1);
|
||||
@ -2993,11 +2990,35 @@ DLOG(checkpoint());
|
||||
|
||||
vvfat_close_current_file(s);
|
||||
|
||||
if (sector_num == s->offset_to_bootsector && nb_sectors == 1) {
|
||||
/*
|
||||
* Write on bootsector. Allow only changing the reserved1 field,
|
||||
* used to mark volume dirtiness
|
||||
*/
|
||||
unsigned char *bootsector = s->first_sectors
|
||||
+ s->offset_to_bootsector * 0x200;
|
||||
/*
|
||||
* LATER TODO: if FAT32, this is wrong (see init_directories(),
|
||||
* which always creates a FAT16 bootsector)
|
||||
*/
|
||||
const int reserved1_offset = offsetof(bootsector_t, u.fat16.reserved1);
|
||||
|
||||
for (i = 0; i < 0x200; i++) {
|
||||
if (i != reserved1_offset && bootsector[i] != buf[i]) {
|
||||
fprintf(stderr, "Tried to write to protected bootsector\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update bootsector with the only updatable byte, and return success */
|
||||
bootsector[reserved1_offset] = buf[reserved1_offset];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some sanity checks:
|
||||
* - do not allow writing to the boot sector
|
||||
*/
|
||||
|
||||
if (sector_num < s->offset_to_fat)
|
||||
return -1;
|
||||
|
||||
@ -3146,10 +3167,9 @@ static int enable_write_target(BlockDriverState *bs, Error **errp)
|
||||
|
||||
array_init(&(s->commits), sizeof(commit_t));
|
||||
|
||||
s->qcow_filename = g_malloc(PATH_MAX);
|
||||
ret = get_tmp_filename(s->qcow_filename, PATH_MAX);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "can't create temporary file");
|
||||
s->qcow_filename = create_tmp_file(errp);
|
||||
if (!s->qcow_filename) {
|
||||
ret = -ENOENT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
24
blockdev.c
24
blockdev.c
@ -1630,8 +1630,8 @@ static void external_snapshot_abort(BlkActionState *common)
|
||||
aio_context_release(aio_context);
|
||||
aio_context_acquire(tmp_context);
|
||||
|
||||
ret = bdrv_try_set_aio_context(state->old_bs,
|
||||
aio_context, NULL);
|
||||
ret = bdrv_try_change_aio_context(state->old_bs,
|
||||
aio_context, NULL, NULL);
|
||||
assert(ret == 0);
|
||||
|
||||
aio_context_release(tmp_context);
|
||||
@ -1792,12 +1792,12 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
|
||||
old_context = bdrv_get_aio_context(target_bs);
|
||||
aio_context_release(aio_context);
|
||||
aio_context_acquire(old_context);
|
||||
|
||||
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
||||
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_unref(target_bs);
|
||||
aio_context_release(old_context);
|
||||
@ -1892,12 +1892,12 @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
old_context = bdrv_get_aio_context(target_bs);
|
||||
aio_context_acquire(old_context);
|
||||
|
||||
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
||||
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
|
||||
if (ret < 0) {
|
||||
aio_context_release(old_context);
|
||||
return;
|
||||
@ -2448,7 +2448,7 @@ void coroutine_fn qmp_block_resize(bool has_device, const char *device,
|
||||
bdrv_co_unlock(bs);
|
||||
|
||||
old_ctx = bdrv_co_enter(bs);
|
||||
blk_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp);
|
||||
blk_co_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp);
|
||||
bdrv_co_leave(bs, old_ctx);
|
||||
|
||||
bdrv_co_lock(bs);
|
||||
@ -3194,12 +3194,12 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
!bdrv_has_zero_init(target_bs)));
|
||||
|
||||
|
||||
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
|
||||
old_context = bdrv_get_aio_context(target_bs);
|
||||
aio_context_release(aio_context);
|
||||
aio_context_acquire(old_context);
|
||||
|
||||
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
||||
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_unref(target_bs);
|
||||
aio_context_release(old_context);
|
||||
@ -3266,12 +3266,12 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
|
||||
|
||||
zero_target = (sync == MIRROR_SYNC_MODE_FULL);
|
||||
|
||||
/* Honor bdrv_try_set_aio_context() context acquisition requirements. */
|
||||
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
|
||||
old_context = bdrv_get_aio_context(target_bs);
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(old_context);
|
||||
|
||||
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
|
||||
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
|
||||
|
||||
aio_context_release(old_context);
|
||||
aio_context_acquire(aio_context);
|
||||
@ -3767,7 +3767,7 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
|
||||
old_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(old_context);
|
||||
|
||||
bdrv_try_set_aio_context(bs, new_context, errp);
|
||||
bdrv_try_change_aio_context(bs, new_context, NULL, errp);
|
||||
|
||||
aio_context_release(old_context);
|
||||
}
|
||||
|
56
blockjob.c
56
blockjob.c
@ -126,39 +126,50 @@ static void child_job_drained_end(BdrvChild *c, int *drained_end_counter)
|
||||
job_resume(&job->job);
|
||||
}
|
||||
|
||||
static bool child_job_can_set_aio_ctx(BdrvChild *c, AioContext *ctx,
|
||||
GSList **ignore, Error **errp)
|
||||
typedef struct BdrvStateChildJobContext {
|
||||
AioContext *new_ctx;
|
||||
BlockJob *job;
|
||||
} BdrvStateChildJobContext;
|
||||
|
||||
static void child_job_set_aio_ctx_commit(void *opaque)
|
||||
{
|
||||
BdrvStateChildJobContext *s = opaque;
|
||||
BlockJob *job = s->job;
|
||||
|
||||
job_set_aio_context(&job->job, s->new_ctx);
|
||||
}
|
||||
|
||||
static TransactionActionDrv change_child_job_context = {
|
||||
.commit = child_job_set_aio_ctx_commit,
|
||||
.clean = g_free,
|
||||
};
|
||||
|
||||
static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx,
|
||||
GHashTable *visited, Transaction *tran,
|
||||
Error **errp)
|
||||
{
|
||||
BlockJob *job = c->opaque;
|
||||
BdrvStateChildJobContext *s;
|
||||
GSList *l;
|
||||
|
||||
for (l = job->nodes; l; l = l->next) {
|
||||
BdrvChild *sibling = l->data;
|
||||
if (!bdrv_child_can_set_aio_context(sibling, ctx, ignore, errp)) {
|
||||
if (!bdrv_child_change_aio_context(sibling, ctx, visited,
|
||||
tran, errp)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
s = g_new(BdrvStateChildJobContext, 1);
|
||||
*s = (BdrvStateChildJobContext) {
|
||||
.new_ctx = ctx,
|
||||
.job = job,
|
||||
};
|
||||
|
||||
tran_add(tran, &change_child_job_context, s);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx,
|
||||
GSList **ignore)
|
||||
{
|
||||
BlockJob *job = c->opaque;
|
||||
GSList *l;
|
||||
|
||||
for (l = job->nodes; l; l = l->next) {
|
||||
BdrvChild *sibling = l->data;
|
||||
if (g_slist_find(*ignore, sibling)) {
|
||||
continue;
|
||||
}
|
||||
*ignore = g_slist_prepend(*ignore, sibling);
|
||||
bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore);
|
||||
}
|
||||
|
||||
job_set_aio_context(&job->job, ctx);
|
||||
}
|
||||
|
||||
static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
|
||||
{
|
||||
BlockJob *job = c->opaque;
|
||||
@ -172,8 +183,7 @@ static const BdrvChildClass child_job = {
|
||||
.drained_begin = child_job_drained_begin,
|
||||
.drained_poll = child_job_drained_poll,
|
||||
.drained_end = child_job_drained_end,
|
||||
.can_set_aio_ctx = child_job_can_set_aio_ctx,
|
||||
.set_aio_ctx = child_job_set_aio_ctx,
|
||||
.change_aio_ctx = child_job_change_aio_ctx,
|
||||
.stay_at_node = true,
|
||||
.get_parent_aio_context = child_job_get_parent_aio_context,
|
||||
};
|
||||
|
@ -9,6 +9,7 @@
|
||||
#ifndef I386_HOST_SIGNAL_H
|
||||
#define I386_HOST_SIGNAL_H
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/ucontext.h>
|
||||
#include <machine/trap.h>
|
||||
#include <vm/pmap.h>
|
||||
|
@ -9,6 +9,7 @@
|
||||
#ifndef X86_64_HOST_SIGNAL_H
|
||||
#define X86_64_HOST_SIGNAL_H
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/ucontext.h>
|
||||
#include <machine/trap.h>
|
||||
#include <vm/pmap.h>
|
||||
|
@ -663,7 +663,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
||||
page_dump(stdout);
|
||||
printf("\n");
|
||||
#endif
|
||||
tb_invalidate_phys_range(start, start + len);
|
||||
mmap_unlock();
|
||||
return start;
|
||||
fail:
|
||||
@ -769,7 +768,6 @@ int target_munmap(abi_ulong start, abi_ulong len)
|
||||
|
||||
if (ret == 0) {
|
||||
page_set_flags(start, start + len, 0);
|
||||
tb_invalidate_phys_range(start, start + len);
|
||||
}
|
||||
mmap_unlock();
|
||||
return ret;
|
||||
|
@ -23,7 +23,6 @@ CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_PIIX4=y
|
||||
CONFIG_IDE_ISA=y
|
||||
CONFIG_IDE_PIIX=y
|
||||
CONFIG_PFLASH_CFI01=y
|
||||
CONFIG_I8259=y
|
||||
CONFIG_MC146818RTC=y
|
||||
|
@ -1,7 +1,6 @@
|
||||
# Default configuration for mips64el-softmmu
|
||||
|
||||
include ../mips-softmmu/common.mak
|
||||
CONFIG_IDE_VIA=y
|
||||
CONFIG_FULOONG=y
|
||||
CONFIG_LOONGSON3V=y
|
||||
CONFIG_ATI_VGA=y
|
||||
|
25
configure
vendored
25
configure
vendored
@ -1285,7 +1285,7 @@ if test "$stack_protector" != "no"; then
|
||||
cat > $TMPC << EOF
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
char arr[64], *p = arr, *c = argv[0];
|
||||
char arr[64], *p = arr, *c = argv[argc - 1];
|
||||
while (*c) {
|
||||
*p++ = *c++;
|
||||
}
|
||||
@ -1354,6 +1354,8 @@ static THREAD int tls_var;
|
||||
int main(void) { return tls_var; }
|
||||
EOF
|
||||
|
||||
# Meson currently only handles pie as a boolean for now so if we have
|
||||
# explicitly disabled PIE we need to extend our cflags because it wont.
|
||||
if test "$static" = "yes"; then
|
||||
if test "$pie" != "no" && compile_prog "-Werror -fPIE -DPIE" "-static-pie"; then
|
||||
CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS"
|
||||
@ -1362,13 +1364,12 @@ if test "$static" = "yes"; then
|
||||
error_exit "-static-pie not available due to missing toolchain support"
|
||||
else
|
||||
pie="no"
|
||||
QEMU_CFLAGS="-fno-pie -no-pie $QEMU_CFLAGS"
|
||||
fi
|
||||
elif test "$pie" = "no"; then
|
||||
if compile_prog "-Werror -fno-pie" "-no-pie"; then
|
||||
CONFIGURE_CFLAGS="-fno-pie $CONFIGURE_CFLAGS"
|
||||
CONFIGURE_LDFLAGS="-no-pie $CONFIGURE_LDFLAGS"
|
||||
# Meson currently only handles pie as a boolean for now so if we have
|
||||
# explicitly disabled PIE we need to extend our cflags because it wont.
|
||||
QEMU_CFLAGS="-fno-pie -no-pie $QEMU_CFLAGS"
|
||||
fi
|
||||
elif compile_prog "-Werror -fPIE -DPIE" "-pie"; then
|
||||
@ -1633,7 +1634,7 @@ fi
|
||||
|
||||
if test "$safe_stack" = "yes"; then
|
||||
cat > $TMPC << EOF
|
||||
int main(int argc, char *argv[])
|
||||
int main(void)
|
||||
{
|
||||
#if ! __has_feature(safe_stack)
|
||||
#error SafeStack Disabled
|
||||
@ -1655,7 +1656,7 @@ EOF
|
||||
fi
|
||||
else
|
||||
cat > $TMPC << EOF
|
||||
int main(int argc, char *argv[])
|
||||
int main(void)
|
||||
{
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(safe_stack)
|
||||
@ -1701,7 +1702,7 @@ static const int Z = 1;
|
||||
#define TAUT(X) ((X) == Z)
|
||||
#define PAREN(X, Y) (X == Y)
|
||||
#define ID(X) (X)
|
||||
int main(int argc, char *argv[])
|
||||
int main(void)
|
||||
{
|
||||
int x = 0, y = 0;
|
||||
x = ID(x);
|
||||
@ -1904,6 +1905,15 @@ probe_target_compiler() {
|
||||
container_cross_ranlib=
|
||||
container_cross_strip=
|
||||
|
||||
# We shall skip configuring the target compiler if the user didn't
|
||||
# bother enabling an appropriate guest. This avoids building
|
||||
# extraneous firmware images and tests.
|
||||
if test "${target_list#*$1}" != "$1"; then
|
||||
break;
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
|
||||
target_arch=${1%%-*}
|
||||
case $target_arch in
|
||||
aarch64) container_hosts="x86_64 aarch64" ;;
|
||||
@ -2511,6 +2521,9 @@ echo "HOST_CC=$host_cc" >> $config_host_mak
|
||||
if test -n "$gdb_bin"; then
|
||||
echo "HAVE_GDB_BIN=$gdb_bin" >> $config_host_mak
|
||||
fi
|
||||
if test "$plugins" = "yes" ; then
|
||||
echo "CONFIG_PLUGIN=y" >> $config_host_mak
|
||||
fi
|
||||
|
||||
tcg_tests_targets=
|
||||
for target in $target_list; do
|
||||
|
@ -125,6 +125,7 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb,
|
||||
|
||||
if (va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) {
|
||||
eprintf("Failed to extract entire KDBG\n");
|
||||
free(kdbg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@ SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES)))
|
||||
CFLAGS = $(GLIB_CFLAGS)
|
||||
CFLAGS += -fPIC -Wall $(filter -W%, $(QEMU_CFLAGS))
|
||||
CFLAGS += $(if $(findstring no-psabi,$(QEMU_CFLAGS)),-Wpsabi)
|
||||
CFLAGS += $(if $(CONFIG_DEBUG_TCG), -ggdb -O0)
|
||||
CFLAGS += -I$(SRC_PATH)/include/qemu
|
||||
|
||||
all: $(SONAMES)
|
||||
|
@ -18,11 +18,30 @@
|
||||
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
|
||||
|
||||
/* Store last executed instruction on each vCPU as a GString */
|
||||
GArray *last_exec;
|
||||
static GPtrArray *last_exec;
|
||||
static GMutex expand_array_lock;
|
||||
|
||||
static GPtrArray *imatches;
|
||||
static GArray *amatches;
|
||||
|
||||
/*
|
||||
* Expand last_exec array.
|
||||
*
|
||||
* As we could have multiple threads trying to do this we need to
|
||||
* serialise the expansion under a lock. Threads accessing already
|
||||
* created entries can continue without issue even if the ptr array
|
||||
* gets reallocated during resize.
|
||||
*/
|
||||
static void expand_last_exec(int cpu_index)
|
||||
{
|
||||
g_mutex_lock(&expand_array_lock);
|
||||
while (cpu_index >= last_exec->len) {
|
||||
GString *s = g_string_new(NULL);
|
||||
g_ptr_array_add(last_exec, s);
|
||||
}
|
||||
g_mutex_unlock(&expand_array_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add memory read or write information to current instruction log
|
||||
*/
|
||||
@ -33,7 +52,7 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t info,
|
||||
|
||||
/* Find vCPU in array */
|
||||
g_assert(cpu_index < last_exec->len);
|
||||
s = g_array_index(last_exec, GString *, cpu_index);
|
||||
s = g_ptr_array_index(last_exec, cpu_index);
|
||||
|
||||
/* Indicate type of memory access */
|
||||
if (qemu_plugin_mem_is_store(info)) {
|
||||
@ -61,11 +80,10 @@ static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
|
||||
GString *s;
|
||||
|
||||
/* Find or create vCPU in array */
|
||||
while (cpu_index >= last_exec->len) {
|
||||
s = g_string_new(NULL);
|
||||
g_array_append_val(last_exec, s);
|
||||
if (cpu_index >= last_exec->len) {
|
||||
expand_last_exec(cpu_index);
|
||||
}
|
||||
s = g_array_index(last_exec, GString *, cpu_index);
|
||||
s = g_ptr_array_index(last_exec, cpu_index);
|
||||
|
||||
/* Print previous instruction in cache */
|
||||
if (s->len) {
|
||||
@ -163,7 +181,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
|
||||
guint i;
|
||||
GString *s;
|
||||
for (i = 0; i < last_exec->len; i++) {
|
||||
s = g_array_index(last_exec, GString *, i);
|
||||
s = g_ptr_array_index(last_exec, i);
|
||||
if (s->str) {
|
||||
qemu_plugin_outs(s->str);
|
||||
qemu_plugin_outs("\n");
|
||||
@ -201,7 +219,11 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
|
||||
* Initialize dynamic array to cache vCPU instruction. In user mode
|
||||
* we don't know the size before emulation.
|
||||
*/
|
||||
last_exec = g_array_new(FALSE, FALSE, sizeof(GString *));
|
||||
if (info->system_emulation) {
|
||||
last_exec = g_ptr_array_sized_new(info->system.max_vcpus);
|
||||
} else {
|
||||
last_exec = g_ptr_array_new();
|
||||
}
|
||||
|
||||
for (int i = 0; i < argc; i++) {
|
||||
char *opt = argv[i];
|
||||
|
14
cpu.c
14
cpu.c
@ -409,15 +409,23 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
/* cache the cpu class for the hotpath */
|
||||
cpu->cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
cpu_list_add(cpu);
|
||||
if (!accel_cpu_realizefn(cpu, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* NB: errp parameter is unused currently */
|
||||
if (tcg_enabled()) {
|
||||
tcg_exec_realizefn(cpu, errp);
|
||||
}
|
||||
|
||||
/* Wait until cpu initialization complete before exposing cpu. */
|
||||
cpu_list_add(cpu);
|
||||
|
||||
/* Plugin initialization must wait until cpu_index assigned. */
|
||||
if (tcg_enabled()) {
|
||||
qemu_plugin_vcpu_init_hook(cpu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
|
||||
qdev_get_vmsd(DEVICE(cpu))->unmigratable);
|
||||
@ -552,7 +560,7 @@ void list_cpus(const char *optarg)
|
||||
void tb_invalidate_phys_addr(target_ulong addr)
|
||||
{
|
||||
mmap_lock();
|
||||
tb_invalidate_phys_page_range(addr, addr + 1);
|
||||
tb_invalidate_phys_page(addr);
|
||||
mmap_unlock();
|
||||
}
|
||||
|
||||
@ -582,7 +590,7 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
|
||||
return;
|
||||
}
|
||||
ram_addr = memory_region_get_ram_addr(mr) + addr;
|
||||
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
|
||||
tb_invalidate_phys_page(ram_addr);
|
||||
}
|
||||
|
||||
//// --- Begin LibAFL code ---
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "crypto/akcipher.h"
|
||||
#include "akcipherpriv.h"
|
||||
#include "der.h"
|
||||
#include "rsakey.h"
|
||||
|
||||
#if defined(CONFIG_GCRYPT)
|
||||
#include "akcipher-gcrypt.c.inc"
|
||||
@ -106,3 +108,19 @@ void qcrypto_akcipher_free(QCryptoAkCipher *akcipher)
|
||||
|
||||
drv->free(akcipher);
|
||||
}
|
||||
|
||||
int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts,
|
||||
uint8_t *key, size_t keylen,
|
||||
uint8_t **dst, size_t *dst_len,
|
||||
Error **errp)
|
||||
{
|
||||
switch (opts->alg) {
|
||||
case QCRYPTO_AKCIPHER_ALG_RSA:
|
||||
qcrypto_akcipher_rsakey_export_p8info(key, keylen, dst, dst_len);
|
||||
return 0;
|
||||
|
||||
default:
|
||||
error_setg(errp, "Unsupported algorithm: %u", opts->alg);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
143
crypto/block-luks-priv.h
Normal file
143
crypto/block-luks-priv.h
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* QEMU Crypto block device encryption LUKS format
|
||||
*
|
||||
* Copyright (c) 2015-2016 Red Hat, Inc.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/bswap.h"
|
||||
|
||||
#include "block-luks.h"
|
||||
|
||||
#include "crypto/hash.h"
|
||||
#include "crypto/afsplit.h"
|
||||
#include "crypto/pbkdf.h"
|
||||
#include "crypto/secret.h"
|
||||
#include "crypto/random.h"
|
||||
#include "qemu/uuid.h"
|
||||
|
||||
#include "qemu/coroutine.h"
|
||||
#include "qemu/bitmap.h"
|
||||
|
||||
/*
|
||||
* Reference for the LUKS format implemented here is
|
||||
*
|
||||
* docs/on-disk-format.pdf
|
||||
*
|
||||
* in 'cryptsetup' package source code
|
||||
*
|
||||
* This file implements the 1.2.1 specification, dated
|
||||
* Oct 16, 2011.
|
||||
*/
|
||||
|
||||
typedef struct QCryptoBlockLUKSHeader QCryptoBlockLUKSHeader;
|
||||
typedef struct QCryptoBlockLUKSKeySlot QCryptoBlockLUKSKeySlot;
|
||||
|
||||
|
||||
/* The following constants are all defined by the LUKS spec */
|
||||
#define QCRYPTO_BLOCK_LUKS_VERSION 1
|
||||
|
||||
#define QCRYPTO_BLOCK_LUKS_MAGIC_LEN 6
|
||||
#define QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN 32
|
||||
#define QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN 32
|
||||
#define QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN 32
|
||||
#define QCRYPTO_BLOCK_LUKS_DIGEST_LEN 20
|
||||
#define QCRYPTO_BLOCK_LUKS_SALT_LEN 32
|
||||
#define QCRYPTO_BLOCK_LUKS_UUID_LEN 40
|
||||
#define QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS 8
|
||||
#define QCRYPTO_BLOCK_LUKS_STRIPES 4000
|
||||
#define QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS 1000
|
||||
#define QCRYPTO_BLOCK_LUKS_MIN_MASTER_KEY_ITERS 1000
|
||||
#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET 4096
|
||||
|
||||
#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED 0x0000DEAD
|
||||
#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED 0x00AC71F3
|
||||
|
||||
#define QCRYPTO_BLOCK_LUKS_SECTOR_SIZE 512LL
|
||||
|
||||
#define QCRYPTO_BLOCK_LUKS_DEFAULT_ITER_TIME_MS 2000
|
||||
#define QCRYPTO_BLOCK_LUKS_ERASE_ITERATIONS 40
|
||||
|
||||
static const char qcrypto_block_luks_magic[QCRYPTO_BLOCK_LUKS_MAGIC_LEN] = {
|
||||
'L', 'U', 'K', 'S', 0xBA, 0xBE
|
||||
};
|
||||
|
||||
/*
|
||||
* This struct is written to disk in big-endian format,
|
||||
* but operated upon in native-endian format.
|
||||
*/
|
||||
struct QCryptoBlockLUKSKeySlot {
|
||||
/* state of keyslot, enabled/disable */
|
||||
uint32_t active;
|
||||
/* iterations for PBKDF2 */
|
||||
uint32_t iterations;
|
||||
/* salt for PBKDF2 */
|
||||
uint8_t salt[QCRYPTO_BLOCK_LUKS_SALT_LEN];
|
||||
/* start sector of key material */
|
||||
uint32_t key_offset_sector;
|
||||
/* number of anti-forensic stripes */
|
||||
uint32_t stripes;
|
||||
};
|
||||
|
||||
/*
|
||||
* This struct is written to disk in big-endian format,
|
||||
* but operated upon in native-endian format.
|
||||
*/
|
||||
struct QCryptoBlockLUKSHeader {
|
||||
/* 'L', 'U', 'K', 'S', '0xBA', '0xBE' */
|
||||
char magic[QCRYPTO_BLOCK_LUKS_MAGIC_LEN];
|
||||
|
||||
/* LUKS version, currently 1 */
|
||||
uint16_t version;
|
||||
|
||||
/* cipher name specification (aes, etc) */
|
||||
char cipher_name[QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN];
|
||||
|
||||
/* cipher mode specification (cbc-plain, xts-essiv:sha256, etc) */
|
||||
char cipher_mode[QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN];
|
||||
|
||||
/* hash specification (sha256, etc) */
|
||||
char hash_spec[QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN];
|
||||
|
||||
/* start offset of the volume data (in 512 byte sectors) */
|
||||
uint32_t payload_offset_sector;
|
||||
|
||||
/* Number of key bytes */
|
||||
uint32_t master_key_len;
|
||||
|
||||
/* master key checksum after PBKDF2 */
|
||||
uint8_t master_key_digest[QCRYPTO_BLOCK_LUKS_DIGEST_LEN];
|
||||
|
||||
/* salt for master key PBKDF2 */
|
||||
uint8_t master_key_salt[QCRYPTO_BLOCK_LUKS_SALT_LEN];
|
||||
|
||||
/* iterations for master key PBKDF2 */
|
||||
uint32_t master_key_iterations;
|
||||
|
||||
/* UUID of the partition in standard ASCII representation */
|
||||
uint8_t uuid[QCRYPTO_BLOCK_LUKS_UUID_LEN];
|
||||
|
||||
/* key slots */
|
||||
QCryptoBlockLUKSKeySlot key_slots[QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS];
|
||||
};
|
||||
|
||||
|
||||
void
|
||||
qcrypto_block_luks_to_disk_endian(QCryptoBlockLUKSHeader *hdr);
|
||||
void
|
||||
qcrypto_block_luks_from_disk_endian(QCryptoBlockLUKSHeader *hdr);
|
@ -23,6 +23,7 @@
|
||||
#include "qemu/bswap.h"
|
||||
|
||||
#include "block-luks.h"
|
||||
#include "block-luks-priv.h"
|
||||
|
||||
#include "crypto/hash.h"
|
||||
#include "crypto/afsplit.h"
|
||||
@ -46,37 +47,6 @@
|
||||
*/
|
||||
|
||||
typedef struct QCryptoBlockLUKS QCryptoBlockLUKS;
|
||||
typedef struct QCryptoBlockLUKSHeader QCryptoBlockLUKSHeader;
|
||||
typedef struct QCryptoBlockLUKSKeySlot QCryptoBlockLUKSKeySlot;
|
||||
|
||||
|
||||
/* The following constants are all defined by the LUKS spec */
|
||||
#define QCRYPTO_BLOCK_LUKS_VERSION 1
|
||||
|
||||
#define QCRYPTO_BLOCK_LUKS_MAGIC_LEN 6
|
||||
#define QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN 32
|
||||
#define QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN 32
|
||||
#define QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN 32
|
||||
#define QCRYPTO_BLOCK_LUKS_DIGEST_LEN 20
|
||||
#define QCRYPTO_BLOCK_LUKS_SALT_LEN 32
|
||||
#define QCRYPTO_BLOCK_LUKS_UUID_LEN 40
|
||||
#define QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS 8
|
||||
#define QCRYPTO_BLOCK_LUKS_STRIPES 4000
|
||||
#define QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS 1000
|
||||
#define QCRYPTO_BLOCK_LUKS_MIN_MASTER_KEY_ITERS 1000
|
||||
#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET 4096
|
||||
|
||||
#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED 0x0000DEAD
|
||||
#define QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED 0x00AC71F3
|
||||
|
||||
#define QCRYPTO_BLOCK_LUKS_SECTOR_SIZE 512LL
|
||||
|
||||
#define QCRYPTO_BLOCK_LUKS_DEFAULT_ITER_TIME_MS 2000
|
||||
#define QCRYPTO_BLOCK_LUKS_ERASE_ITERATIONS 40
|
||||
|
||||
static const char qcrypto_block_luks_magic[QCRYPTO_BLOCK_LUKS_MAGIC_LEN] = {
|
||||
'L', 'U', 'K', 'S', 0xBA, 0xBE
|
||||
};
|
||||
|
||||
typedef struct QCryptoBlockLUKSNameMap QCryptoBlockLUKSNameMap;
|
||||
struct QCryptoBlockLUKSNameMap {
|
||||
@ -134,69 +104,7 @@ qcrypto_block_luks_cipher_name_map[] = {
|
||||
{ "twofish", qcrypto_block_luks_cipher_size_map_twofish },
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* This struct is written to disk in big-endian format,
|
||||
* but operated upon in native-endian format.
|
||||
*/
|
||||
struct QCryptoBlockLUKSKeySlot {
|
||||
/* state of keyslot, enabled/disable */
|
||||
uint32_t active;
|
||||
/* iterations for PBKDF2 */
|
||||
uint32_t iterations;
|
||||
/* salt for PBKDF2 */
|
||||
uint8_t salt[QCRYPTO_BLOCK_LUKS_SALT_LEN];
|
||||
/* start sector of key material */
|
||||
uint32_t key_offset_sector;
|
||||
/* number of anti-forensic stripes */
|
||||
uint32_t stripes;
|
||||
};
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct QCryptoBlockLUKSKeySlot) != 48);
|
||||
|
||||
|
||||
/*
|
||||
* This struct is written to disk in big-endian format,
|
||||
* but operated upon in native-endian format.
|
||||
*/
|
||||
struct QCryptoBlockLUKSHeader {
|
||||
/* 'L', 'U', 'K', 'S', '0xBA', '0xBE' */
|
||||
char magic[QCRYPTO_BLOCK_LUKS_MAGIC_LEN];
|
||||
|
||||
/* LUKS version, currently 1 */
|
||||
uint16_t version;
|
||||
|
||||
/* cipher name specification (aes, etc) */
|
||||
char cipher_name[QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN];
|
||||
|
||||
/* cipher mode specification (cbc-plain, xts-essiv:sha256, etc) */
|
||||
char cipher_mode[QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN];
|
||||
|
||||
/* hash specification (sha256, etc) */
|
||||
char hash_spec[QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN];
|
||||
|
||||
/* start offset of the volume data (in 512 byte sectors) */
|
||||
uint32_t payload_offset_sector;
|
||||
|
||||
/* Number of key bytes */
|
||||
uint32_t master_key_len;
|
||||
|
||||
/* master key checksum after PBKDF2 */
|
||||
uint8_t master_key_digest[QCRYPTO_BLOCK_LUKS_DIGEST_LEN];
|
||||
|
||||
/* salt for master key PBKDF2 */
|
||||
uint8_t master_key_salt[QCRYPTO_BLOCK_LUKS_SALT_LEN];
|
||||
|
||||
/* iterations for master key PBKDF2 */
|
||||
uint32_t master_key_iterations;
|
||||
|
||||
/* UUID of the partition in standard ASCII representation */
|
||||
uint8_t uuid[QCRYPTO_BLOCK_LUKS_UUID_LEN];
|
||||
|
||||
/* key slots */
|
||||
QCryptoBlockLUKSKeySlot key_slots[QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS];
|
||||
};
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct QCryptoBlockLUKSHeader) != 592);
|
||||
|
||||
|
||||
@ -254,7 +162,7 @@ static int qcrypto_block_luks_cipher_name_lookup(const char *name,
|
||||
}
|
||||
}
|
||||
|
||||
error_setg(errp, "Algorithm %s with key size %d bytes not supported",
|
||||
error_setg(errp, "Algorithm '%s' with key size %d bytes not supported",
|
||||
name, key_bytes);
|
||||
return 0;
|
||||
}
|
||||
@ -290,7 +198,7 @@ static int qcrypto_block_luks_name_lookup(const char *name,
|
||||
int ret = qapi_enum_parse(map, name, -1, NULL);
|
||||
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "%s %s not supported", type, name);
|
||||
error_setg(errp, "%s '%s' not supported", type, name);
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
@ -440,6 +348,51 @@ qcrypto_block_luks_splitkeylen_sectors(const QCryptoBlockLUKS *luks,
|
||||
return ROUND_UP(splitkeylen_sectors, header_sectors);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
qcrypto_block_luks_to_disk_endian(QCryptoBlockLUKSHeader *hdr)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
/*
|
||||
* Everything on disk uses Big Endian (tm), so flip header fields
|
||||
* before writing them
|
||||
*/
|
||||
cpu_to_be16s(&hdr->version);
|
||||
cpu_to_be32s(&hdr->payload_offset_sector);
|
||||
cpu_to_be32s(&hdr->master_key_len);
|
||||
cpu_to_be32s(&hdr->master_key_iterations);
|
||||
|
||||
for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) {
|
||||
cpu_to_be32s(&hdr->key_slots[i].active);
|
||||
cpu_to_be32s(&hdr->key_slots[i].iterations);
|
||||
cpu_to_be32s(&hdr->key_slots[i].key_offset_sector);
|
||||
cpu_to_be32s(&hdr->key_slots[i].stripes);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
qcrypto_block_luks_from_disk_endian(QCryptoBlockLUKSHeader *hdr)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
/*
|
||||
* The header is always stored in big-endian format, so
|
||||
* convert everything to native
|
||||
*/
|
||||
be16_to_cpus(&hdr->version);
|
||||
be32_to_cpus(&hdr->payload_offset_sector);
|
||||
be32_to_cpus(&hdr->master_key_len);
|
||||
be32_to_cpus(&hdr->master_key_iterations);
|
||||
|
||||
for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) {
|
||||
be32_to_cpus(&hdr->key_slots[i].active);
|
||||
be32_to_cpus(&hdr->key_slots[i].iterations);
|
||||
be32_to_cpus(&hdr->key_slots[i].key_offset_sector);
|
||||
be32_to_cpus(&hdr->key_slots[i].stripes);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Stores the main LUKS header, taking care of endianess
|
||||
*/
|
||||
@ -451,28 +404,13 @@ qcrypto_block_luks_store_header(QCryptoBlock *block,
|
||||
{
|
||||
const QCryptoBlockLUKS *luks = block->opaque;
|
||||
Error *local_err = NULL;
|
||||
size_t i;
|
||||
g_autofree QCryptoBlockLUKSHeader *hdr_copy = NULL;
|
||||
|
||||
/* Create a copy of the header */
|
||||
hdr_copy = g_new0(QCryptoBlockLUKSHeader, 1);
|
||||
memcpy(hdr_copy, &luks->header, sizeof(QCryptoBlockLUKSHeader));
|
||||
|
||||
/*
|
||||
* Everything on disk uses Big Endian (tm), so flip header fields
|
||||
* before writing them
|
||||
*/
|
||||
cpu_to_be16s(&hdr_copy->version);
|
||||
cpu_to_be32s(&hdr_copy->payload_offset_sector);
|
||||
cpu_to_be32s(&hdr_copy->master_key_len);
|
||||
cpu_to_be32s(&hdr_copy->master_key_iterations);
|
||||
|
||||
for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) {
|
||||
cpu_to_be32s(&hdr_copy->key_slots[i].active);
|
||||
cpu_to_be32s(&hdr_copy->key_slots[i].iterations);
|
||||
cpu_to_be32s(&hdr_copy->key_slots[i].key_offset_sector);
|
||||
cpu_to_be32s(&hdr_copy->key_slots[i].stripes);
|
||||
}
|
||||
qcrypto_block_luks_to_disk_endian(hdr_copy);
|
||||
|
||||
/* Write out the partition header and key slot headers */
|
||||
writefunc(block, 0, (const uint8_t *)hdr_copy, sizeof(*hdr_copy),
|
||||
@ -496,7 +434,6 @@ qcrypto_block_luks_load_header(QCryptoBlock *block,
|
||||
Error **errp)
|
||||
{
|
||||
int rv;
|
||||
size_t i;
|
||||
QCryptoBlockLUKS *luks = block->opaque;
|
||||
|
||||
/*
|
||||
@ -512,21 +449,7 @@ qcrypto_block_luks_load_header(QCryptoBlock *block,
|
||||
return rv;
|
||||
}
|
||||
|
||||
/*
|
||||
* The header is always stored in big-endian format, so
|
||||
* convert everything to native
|
||||
*/
|
||||
be16_to_cpus(&luks->header.version);
|
||||
be32_to_cpus(&luks->header.payload_offset_sector);
|
||||
be32_to_cpus(&luks->header.master_key_len);
|
||||
be32_to_cpus(&luks->header.master_key_iterations);
|
||||
|
||||
for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) {
|
||||
be32_to_cpus(&luks->header.key_slots[i].active);
|
||||
be32_to_cpus(&luks->header.key_slots[i].iterations);
|
||||
be32_to_cpus(&luks->header.key_slots[i].key_offset_sector);
|
||||
be32_to_cpus(&luks->header.key_slots[i].stripes);
|
||||
}
|
||||
qcrypto_block_luks_from_disk_endian(&luks->header);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -554,6 +477,36 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!memchr(luks->header.cipher_name, '\0',
|
||||
sizeof(luks->header.cipher_name))) {
|
||||
error_setg(errp, "LUKS header cipher name is not NUL terminated");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!memchr(luks->header.cipher_mode, '\0',
|
||||
sizeof(luks->header.cipher_mode))) {
|
||||
error_setg(errp, "LUKS header cipher mode is not NUL terminated");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!memchr(luks->header.hash_spec, '\0',
|
||||
sizeof(luks->header.hash_spec))) {
|
||||
error_setg(errp, "LUKS header hash spec is not NUL terminated");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (luks->header.payload_offset_sector <
|
||||
DIV_ROUND_UP(QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET,
|
||||
QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) {
|
||||
error_setg(errp, "LUKS payload is overlapping with the header");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (luks->header.master_key_iterations == 0) {
|
||||
error_setg(errp, "LUKS key iteration count is zero");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Check all keyslots for corruption */
|
||||
for (i = 0 ; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS ; i++) {
|
||||
|
||||
@ -564,8 +517,9 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp)
|
||||
header_sectors,
|
||||
slot1->stripes);
|
||||
|
||||
if (slot1->stripes == 0) {
|
||||
error_setg(errp, "Keyslot %zu is corrupted (stripes == 0)", i);
|
||||
if (slot1->stripes != QCRYPTO_BLOCK_LUKS_STRIPES) {
|
||||
error_setg(errp, "Keyslot %zu is corrupted (stripes %d != %d)",
|
||||
i, slot1->stripes, QCRYPTO_BLOCK_LUKS_STRIPES);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -576,6 +530,20 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (slot1->active == QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED &&
|
||||
slot1->iterations == 0) {
|
||||
error_setg(errp, "Keyslot %zu iteration count is zero", i);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (start1 < DIV_ROUND_UP(QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET,
|
||||
QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) {
|
||||
error_setg(errp,
|
||||
"Keyslot %zu is overlapping with the LUKS header",
|
||||
i);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (start1 + len1 > luks->header.payload_offset_sector) {
|
||||
error_setg(errp,
|
||||
"Keyslot %zu is overlapping with the encrypted payload",
|
||||
@ -624,7 +592,7 @@ qcrypto_block_luks_parse_header(QCryptoBlockLUKS *luks, Error **errp)
|
||||
*/
|
||||
ivgen_name = strchr(cipher_mode, '-');
|
||||
if (!ivgen_name) {
|
||||
error_setg(errp, "Unexpected cipher mode string format %s",
|
||||
error_setg(errp, "Unexpected cipher mode string format '%s'",
|
||||
luks->header.cipher_mode);
|
||||
return -1;
|
||||
}
|
||||
|
313
crypto/der.c
313
crypto/der.c
@ -22,20 +22,93 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "crypto/der.h"
|
||||
|
||||
typedef struct QCryptoDerEncodeNode {
|
||||
uint8_t tag;
|
||||
struct QCryptoDerEncodeNode *parent;
|
||||
struct QCryptoDerEncodeNode *next;
|
||||
/* for constructed type, data is null */
|
||||
const uint8_t *data;
|
||||
size_t dlen;
|
||||
} QCryptoDerEncodeNode;
|
||||
|
||||
typedef struct QCryptoEncodeContext {
|
||||
QCryptoDerEncodeNode root;
|
||||
QCryptoDerEncodeNode *current_parent;
|
||||
QCryptoDerEncodeNode *tail;
|
||||
} QCryptoEncodeContext;
|
||||
|
||||
enum QCryptoDERTypeTag {
|
||||
QCRYPTO_DER_TYPE_TAG_BOOL = 0x1,
|
||||
QCRYPTO_DER_TYPE_TAG_INT = 0x2,
|
||||
QCRYPTO_DER_TYPE_TAG_BIT_STR = 0x3,
|
||||
QCRYPTO_DER_TYPE_TAG_OCT_STR = 0x4,
|
||||
QCRYPTO_DER_TYPE_TAG_OCT_NULL = 0x5,
|
||||
QCRYPTO_DER_TYPE_TAG_OCT_OID = 0x6,
|
||||
QCRYPTO_DER_TYPE_TAG_NULL = 0x5,
|
||||
QCRYPTO_DER_TYPE_TAG_OID = 0x6,
|
||||
QCRYPTO_DER_TYPE_TAG_SEQ = 0x10,
|
||||
QCRYPTO_DER_TYPE_TAG_SET = 0x11,
|
||||
};
|
||||
|
||||
#define QCRYPTO_DER_CONSTRUCTED_MASK 0x20
|
||||
enum QCryptoDERTagClass {
|
||||
QCRYPTO_DER_TAG_CLASS_UNIV = 0x0,
|
||||
QCRYPTO_DER_TAG_CLASS_APPL = 0x1,
|
||||
QCRYPTO_DER_TAG_CLASS_CONT = 0x2,
|
||||
QCRYPTO_DER_TAG_CLASS_PRIV = 0x3,
|
||||
};
|
||||
|
||||
enum QCryptoDERTagEnc {
|
||||
QCRYPTO_DER_TAG_ENC_PRIM = 0x0,
|
||||
QCRYPTO_DER_TAG_ENC_CONS = 0x1,
|
||||
};
|
||||
|
||||
#define QCRYPTO_DER_TAG_ENC_MASK 0x20
|
||||
#define QCRYPTO_DER_TAG_ENC_SHIFT 5
|
||||
|
||||
#define QCRYPTO_DER_TAG_CLASS_MASK 0xc0
|
||||
#define QCRYPTO_DER_TAG_CLASS_SHIFT 6
|
||||
|
||||
#define QCRYPTO_DER_TAG_VAL_MASK 0x1f
|
||||
#define QCRYPTO_DER_SHORT_LEN_MASK 0x80
|
||||
|
||||
#define QCRYPTO_DER_TAG(class, enc, val) \
|
||||
(((class) << QCRYPTO_DER_TAG_CLASS_SHIFT) | \
|
||||
((enc) << QCRYPTO_DER_TAG_ENC_SHIFT) | (val))
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_length:
|
||||
* @src_len: the length of source data
|
||||
* @dst: distination to save the encoded 'length', if dst is NULL, only compute
|
||||
* the expected buffer size in bytes.
|
||||
* @dst_len: output parameter, indicates how many bytes wrote.
|
||||
*
|
||||
* Encode the 'length' part of TLV tuple.
|
||||
*/
|
||||
static void qcrypto_der_encode_length(size_t src_len,
|
||||
uint8_t *dst, size_t *dst_len)
|
||||
{
|
||||
size_t max_length = 0xFF;
|
||||
uint8_t length_bytes = 0, header_byte;
|
||||
|
||||
if (src_len < QCRYPTO_DER_SHORT_LEN_MASK) {
|
||||
header_byte = src_len;
|
||||
*dst_len = 1;
|
||||
} else {
|
||||
for (length_bytes = 1; max_length < src_len; length_bytes++) {
|
||||
max_length = (max_length << 8) + max_length;
|
||||
}
|
||||
header_byte = length_bytes;
|
||||
header_byte |= QCRYPTO_DER_SHORT_LEN_MASK;
|
||||
*dst_len = length_bytes + 1;
|
||||
}
|
||||
if (!dst) {
|
||||
return;
|
||||
}
|
||||
*dst++ = header_byte;
|
||||
/* Bigendian length bytes */
|
||||
for (; length_bytes > 0; length_bytes--) {
|
||||
*dst++ = ((src_len >> (length_bytes - 1) * 8) & 0xFF);
|
||||
}
|
||||
}
|
||||
|
||||
static uint8_t qcrypto_der_peek_byte(const uint8_t **data, size_t *dlen)
|
||||
{
|
||||
return **data;
|
||||
@ -150,40 +223,230 @@ static int qcrypto_der_extract_data(const uint8_t **data, size_t *dlen,
|
||||
return qcrypto_der_extract_definite_data(data, dlen, cb, ctx, errp);
|
||||
}
|
||||
|
||||
int qcrypto_der_decode_int(const uint8_t **data, size_t *dlen,
|
||||
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
|
||||
static int qcrypto_der_decode_tlv(const uint8_t expected_tag,
|
||||
const uint8_t **data, size_t *dlen,
|
||||
QCryptoDERDecodeCb cb,
|
||||
void *ctx, Error **errp)
|
||||
{
|
||||
const uint8_t *saved_data = *data;
|
||||
size_t saved_dlen = *dlen;
|
||||
uint8_t tag;
|
||||
int data_length;
|
||||
|
||||
if (*dlen < 1) {
|
||||
error_setg(errp, "Need more data");
|
||||
return -1;
|
||||
}
|
||||
tag = qcrypto_der_cut_byte(data, dlen);
|
||||
|
||||
/* INTEGER must encoded in primitive-form */
|
||||
if (tag != QCRYPTO_DER_TYPE_TAG_INT) {
|
||||
error_setg(errp, "Invalid integer type tag: %u", tag);
|
||||
return -1;
|
||||
if (tag != expected_tag) {
|
||||
error_setg(errp, "Unexpected tag: expected: %u, actual: %u",
|
||||
expected_tag, tag);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return qcrypto_der_extract_data(data, dlen, cb, ctx, errp);
|
||||
data_length = qcrypto_der_extract_data(data, dlen, cb, ctx, errp);
|
||||
if (data_length < 0) {
|
||||
goto error;
|
||||
}
|
||||
return data_length;
|
||||
|
||||
error:
|
||||
*data = saved_data;
|
||||
*dlen = saved_dlen;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int qcrypto_der_decode_int(const uint8_t **data, size_t *dlen,
|
||||
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
|
||||
{
|
||||
const uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_INT);
|
||||
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
|
||||
}
|
||||
|
||||
int qcrypto_der_decode_seq(const uint8_t **data, size_t *dlen,
|
||||
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
|
||||
{
|
||||
uint8_t tag;
|
||||
if (*dlen < 1) {
|
||||
error_setg(errp, "Need more data");
|
||||
return -1;
|
||||
}
|
||||
tag = qcrypto_der_cut_byte(data, dlen);
|
||||
|
||||
/* SEQUENCE must use constructed form */
|
||||
if (tag != (QCRYPTO_DER_TYPE_TAG_SEQ | QCRYPTO_DER_CONSTRUCTED_MASK)) {
|
||||
error_setg(errp, "Invalid type sequence tag: %u", tag);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return qcrypto_der_extract_data(data, dlen, cb, ctx, errp);
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_CONS,
|
||||
QCRYPTO_DER_TYPE_TAG_SEQ);
|
||||
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
|
||||
}
|
||||
|
||||
int qcrypto_der_decode_octet_str(const uint8_t **data, size_t *dlen,
|
||||
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_OCT_STR);
|
||||
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
|
||||
}
|
||||
|
||||
int qcrypto_der_decode_bit_str(const uint8_t **data, size_t *dlen,
|
||||
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_BIT_STR);
|
||||
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
|
||||
}
|
||||
|
||||
int qcrypto_der_decode_oid(const uint8_t **data, size_t *dlen,
|
||||
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_OID);
|
||||
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
|
||||
}
|
||||
|
||||
int qcrypto_der_decode_ctx_tag(const uint8_t **data, size_t *dlen, int tag_id,
|
||||
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_CONT,
|
||||
QCRYPTO_DER_TAG_ENC_CONS,
|
||||
tag_id);
|
||||
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
|
||||
}
|
||||
|
||||
static void qcrypto_der_encode_prim(QCryptoEncodeContext *ctx, uint8_t tag,
|
||||
const uint8_t *data, size_t dlen)
|
||||
{
|
||||
QCryptoDerEncodeNode *node = g_new0(QCryptoDerEncodeNode, 1);
|
||||
size_t nbytes_len;
|
||||
|
||||
node->tag = tag;
|
||||
node->data = data;
|
||||
node->dlen = dlen;
|
||||
node->parent = ctx->current_parent;
|
||||
|
||||
qcrypto_der_encode_length(dlen, NULL, &nbytes_len);
|
||||
/* 1 byte for Tag, nbyte_len for Length, and dlen for Value */
|
||||
node->parent->dlen += 1 + nbytes_len + dlen;
|
||||
|
||||
ctx->tail->next = node;
|
||||
ctx->tail = node;
|
||||
}
|
||||
|
||||
QCryptoEncodeContext *qcrypto_der_encode_ctx_new(void)
|
||||
{
|
||||
QCryptoEncodeContext *ctx = g_new0(QCryptoEncodeContext, 1);
|
||||
ctx->current_parent = &ctx->root;
|
||||
ctx->tail = &ctx->root;
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static void qcrypto_der_encode_cons_begin(QCryptoEncodeContext *ctx,
|
||||
uint8_t tag)
|
||||
{
|
||||
QCryptoDerEncodeNode *node = g_new0(QCryptoDerEncodeNode, 1);
|
||||
|
||||
node->tag = tag;
|
||||
node->parent = ctx->current_parent;
|
||||
ctx->current_parent = node;
|
||||
ctx->tail->next = node;
|
||||
ctx->tail = node;
|
||||
}
|
||||
|
||||
static void qcrypto_der_encode_cons_end(QCryptoEncodeContext *ctx)
|
||||
{
|
||||
QCryptoDerEncodeNode *cons_node = ctx->current_parent;
|
||||
size_t nbytes_len;
|
||||
|
||||
qcrypto_der_encode_length(cons_node->dlen, NULL, &nbytes_len);
|
||||
/* 1 byte for Tag, nbyte_len for Length, and dlen for Value */
|
||||
cons_node->parent->dlen += 1 + nbytes_len + cons_node->dlen;
|
||||
ctx->current_parent = cons_node->parent;
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_seq_begin(QCryptoEncodeContext *ctx)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_CONS,
|
||||
QCRYPTO_DER_TYPE_TAG_SEQ);
|
||||
qcrypto_der_encode_cons_begin(ctx, tag);
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_seq_end(QCryptoEncodeContext *ctx)
|
||||
{
|
||||
qcrypto_der_encode_cons_end(ctx);
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_oid(QCryptoEncodeContext *ctx,
|
||||
const uint8_t *src, size_t src_len)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_OID);
|
||||
qcrypto_der_encode_prim(ctx, tag, src, src_len);
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_int(QCryptoEncodeContext *ctx,
|
||||
const uint8_t *src, size_t src_len)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_INT);
|
||||
qcrypto_der_encode_prim(ctx, tag, src, src_len);
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_null(QCryptoEncodeContext *ctx)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_NULL);
|
||||
qcrypto_der_encode_prim(ctx, tag, NULL, 0);
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx,
|
||||
const uint8_t *src, size_t src_len)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_OCT_STR);
|
||||
qcrypto_der_encode_prim(ctx, tag, src, src_len);
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx)
|
||||
{
|
||||
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
|
||||
QCRYPTO_DER_TAG_ENC_PRIM,
|
||||
QCRYPTO_DER_TYPE_TAG_OCT_STR);
|
||||
qcrypto_der_encode_cons_begin(ctx, tag);
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx)
|
||||
{
|
||||
qcrypto_der_encode_cons_end(ctx);
|
||||
}
|
||||
|
||||
size_t qcrypto_der_encode_ctx_buffer_len(QCryptoEncodeContext *ctx)
|
||||
{
|
||||
return ctx->root.dlen;
|
||||
}
|
||||
|
||||
void qcrypto_der_encode_ctx_flush_and_free(QCryptoEncodeContext *ctx,
|
||||
uint8_t *dst)
|
||||
{
|
||||
QCryptoDerEncodeNode *node, *prev;
|
||||
size_t len;
|
||||
|
||||
for (prev = &ctx->root;
|
||||
(node = prev->next) && (prev->next = node->next, 1);) {
|
||||
/* Tag */
|
||||
*dst++ = node->tag;
|
||||
|
||||
/* Length */
|
||||
qcrypto_der_encode_length(node->dlen, dst, &len);
|
||||
dst += len;
|
||||
|
||||
/* Value */
|
||||
if (node->data) {
|
||||
memcpy(dst, node->data, node->dlen);
|
||||
dst += node->dlen;
|
||||
}
|
||||
g_free(node);
|
||||
}
|
||||
g_free(ctx);
|
||||
}
|
||||
|
211
crypto/der.h
211
crypto/der.h
@ -22,6 +22,11 @@
|
||||
|
||||
#include "qapi/error.h"
|
||||
|
||||
typedef struct QCryptoEncodeContext QCryptoEncodeContext;
|
||||
|
||||
/* rsaEncryption: 1.2.840.113549.1.1.1 */
|
||||
#define QCRYPTO_OID_rsaEncryption "\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01"
|
||||
|
||||
/* Simple decoder used to parse DER encoded rsa keys. */
|
||||
|
||||
/**
|
||||
@ -47,14 +52,13 @@ typedef int (*QCryptoDERDecodeCb) (void *opaque, const uint8_t *value,
|
||||
* will be set to the rest length of data, if cb is not NULL, must
|
||||
* return 0 to make decode success, at last, the length of the data
|
||||
* part of the decoded INTEGER will be returned. Otherwise, -1 is
|
||||
* returned.
|
||||
* returned and the valued of *data and *dlen keep unchanged.
|
||||
*/
|
||||
int qcrypto_der_decode_int(const uint8_t **data,
|
||||
size_t *dlen,
|
||||
QCryptoDERDecodeCb cb,
|
||||
void *opaque,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_der_decode_seq:
|
||||
*
|
||||
@ -70,7 +74,7 @@ int qcrypto_der_decode_int(const uint8_t **data,
|
||||
* will be set to the rest length of data, if cb is not NULL, must
|
||||
* return 0 to make decode success, at last, the length of the data
|
||||
* part of the decoded SEQUENCE will be returned. Otherwise, -1 is
|
||||
* returned.
|
||||
* returned and the valued of *data and *dlen keep unchanged.
|
||||
*/
|
||||
int qcrypto_der_decode_seq(const uint8_t **data,
|
||||
size_t *dlen,
|
||||
@ -78,4 +82,205 @@ int qcrypto_der_decode_seq(const uint8_t **data,
|
||||
void *opaque,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_der_decode_oid:
|
||||
*
|
||||
* Decode OID from DER-encoded data, similar with der_decode_int.
|
||||
*
|
||||
* @data: pointer to address of input data
|
||||
* @dlen: pointer to length of input data
|
||||
* @cb: callback invoked when decode succeed, if cb equals NULL, no
|
||||
* callback will be invoked
|
||||
* @opaque: parameter passed to cb
|
||||
*
|
||||
* Returns: On success, *data points to rest data, and *dlen
|
||||
* will be set to the rest length of data, if cb is not NULL, must
|
||||
* return 0 to make decode success, at last, the length of the data
|
||||
* part of the decoded OID will be returned. Otherwise, -1 is
|
||||
* returned and the valued of *data and *dlen keep unchanged.
|
||||
*/
|
||||
int qcrypto_der_decode_oid(const uint8_t **data,
|
||||
size_t *dlen,
|
||||
QCryptoDERDecodeCb cb,
|
||||
void *opaque,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_der_decode_octet_str:
|
||||
*
|
||||
* Decode OCTET STRING from DER-encoded data, similar with der_decode_int.
|
||||
*
|
||||
* @data: pointer to address of input data
|
||||
* @dlen: pointer to length of input data
|
||||
* @cb: callback invoked when decode succeed, if cb equals NULL, no
|
||||
* callback will be invoked
|
||||
* @opaque: parameter passed to cb
|
||||
*
|
||||
* Returns: On success, *data points to rest data, and *dlen
|
||||
* will be set to the rest length of data, if cb is not NULL, must
|
||||
* return 0 to make decode success, at last, the length of the data
|
||||
* part of the decoded OCTET STRING will be returned. Otherwise, -1 is
|
||||
* returned and the valued of *data and *dlen keep unchanged.
|
||||
*/
|
||||
int qcrypto_der_decode_octet_str(const uint8_t **data,
|
||||
size_t *dlen,
|
||||
QCryptoDERDecodeCb cb,
|
||||
void *opaque,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_der_decode_bit_str:
|
||||
*
|
||||
* Decode BIT STRING from DER-encoded data, similar with der_decode_int.
|
||||
*
|
||||
* @data: pointer to address of input data
|
||||
* @dlen: pointer to length of input data
|
||||
* @cb: callback invoked when decode succeed, if cb equals NULL, no
|
||||
* callback will be invoked
|
||||
* @opaque: parameter passed to cb
|
||||
*
|
||||
* Returns: On success, *data points to rest data, and *dlen
|
||||
* will be set to the rest length of data, if cb is not NULL, must
|
||||
* return 0 to make decode success, at last, the length of the data
|
||||
* part of the decoded BIT STRING will be returned. Otherwise, -1 is
|
||||
* returned and the valued of *data and *dlen keep unchanged.
|
||||
*/
|
||||
int qcrypto_der_decode_bit_str(const uint8_t **data,
|
||||
size_t *dlen,
|
||||
QCryptoDERDecodeCb cb,
|
||||
void *opaque,
|
||||
Error **errp);
|
||||
|
||||
|
||||
/**
|
||||
* qcrypto_der_decode_ctx_tag:
|
||||
*
|
||||
* Decode context specific tag
|
||||
*
|
||||
* @data: pointer to address of input data
|
||||
* @dlen: pointer to length of input data
|
||||
* @tag: expected value of context specific tag
|
||||
* @cb: callback invoked when decode succeed, if cb equals NULL, no
|
||||
* callback will be invoked
|
||||
* @opaque: parameter passed to cb
|
||||
*
|
||||
* Returns: On success, *data points to rest data, and *dlen
|
||||
* will be set to the rest length of data, if cb is not NULL, must
|
||||
* return 0 to make decode success, at last, the length of the data
|
||||
* part of the decoded BIT STRING will be returned. Otherwise, -1 is
|
||||
* returned and the valued of *data and *dlen keep unchanged.
|
||||
*/
|
||||
int qcrypto_der_decode_ctx_tag(const uint8_t **data,
|
||||
size_t *dlen, int tag_id,
|
||||
QCryptoDERDecodeCb cb,
|
||||
void *opaque,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_ctx_new:
|
||||
*
|
||||
* Allocate a context used for der encoding.
|
||||
*/
|
||||
QCryptoEncodeContext *qcrypto_der_encode_ctx_new(void);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_seq_begin:
|
||||
* @ctx: the encode context.
|
||||
*
|
||||
* Start encoding a SEQUENCE for ctx.
|
||||
*
|
||||
*/
|
||||
void qcrypto_der_encode_seq_begin(QCryptoEncodeContext *ctx);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_seq_begin:
|
||||
* @ctx: the encode context.
|
||||
*
|
||||
* Finish uencoding a SEQUENCE for ctx.
|
||||
*
|
||||
*/
|
||||
void qcrypto_der_encode_seq_end(QCryptoEncodeContext *ctx);
|
||||
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_oid:
|
||||
* @ctx: the encode context.
|
||||
* @src: the source data of oid, note it should be already encoded, this
|
||||
* function only add tag and length part for it.
|
||||
*
|
||||
* Encode an oid into ctx.
|
||||
*/
|
||||
void qcrypto_der_encode_oid(QCryptoEncodeContext *ctx,
|
||||
const uint8_t *src, size_t src_len);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_int:
|
||||
* @ctx: the encode context.
|
||||
* @src: the source data of integer, note it should be already encoded, this
|
||||
* function only add tag and length part for it.
|
||||
*
|
||||
* Encode an integer into ctx.
|
||||
*/
|
||||
void qcrypto_der_encode_int(QCryptoEncodeContext *ctx,
|
||||
const uint8_t *src, size_t src_len);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_null:
|
||||
* @ctx: the encode context.
|
||||
*
|
||||
* Encode a null into ctx.
|
||||
*/
|
||||
void qcrypto_der_encode_null(QCryptoEncodeContext *ctx);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_octet_str:
|
||||
* @ctx: the encode context.
|
||||
* @src: the source data of the octet string.
|
||||
*
|
||||
* Encode a octet string into ctx.
|
||||
*/
|
||||
void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx,
|
||||
const uint8_t *src, size_t src_len);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_octet_str_begin:
|
||||
* @ctx: the encode context.
|
||||
*
|
||||
* Start encoding a octet string, All fields between
|
||||
* qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end
|
||||
* are encoded as an octet string. This is useful when we need to encode a
|
||||
* encoded SEQUNCE as OCTET STRING.
|
||||
*/
|
||||
void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_octet_str_end:
|
||||
* @ctx: the encode context.
|
||||
*
|
||||
* Finish encoding a octet string, All fields between
|
||||
* qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end
|
||||
* are encoded as an octet string. This is useful when we need to encode a
|
||||
* encoded SEQUNCE as OCTET STRING.
|
||||
*/
|
||||
void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_ctx_buffer_len:
|
||||
* @ctx: the encode context.
|
||||
*
|
||||
* Compute the expected buffer size to save all encoded things.
|
||||
*/
|
||||
size_t qcrypto_der_encode_ctx_buffer_len(QCryptoEncodeContext *ctx);
|
||||
|
||||
/**
|
||||
* qcrypto_der_encode_ctx_flush_and_free:
|
||||
* @ctx: the encode context.
|
||||
* @dst: the distination to save the encoded data, the length of dst should
|
||||
* not less than qcrypto_der_encode_cxt_buffer_len
|
||||
*
|
||||
* Flush all encoded data into dst, then free ctx.
|
||||
*/
|
||||
void qcrypto_der_encode_ctx_flush_and_free(QCryptoEncodeContext *ctx,
|
||||
uint8_t *dst);
|
||||
|
||||
#endif /* QCRYPTO_ASN1_DECODER_H */
|
||||
|
@ -24,6 +24,11 @@
|
||||
#ifndef _WIN32
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#ifdef CONFIG_DARWIN
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/thread_act.h>
|
||||
#include <mach/mach_port.h>
|
||||
#endif
|
||||
|
||||
|
||||
static int qcrypto_pbkdf2_get_thread_cpu(unsigned long long *val_ms,
|
||||
@ -45,6 +50,24 @@ static int qcrypto_pbkdf2_get_thread_cpu(unsigned long long *val_ms,
|
||||
/* QuadPart is units of 100ns and we want ms as unit */
|
||||
*val_ms = thread_time.QuadPart / 10000ll;
|
||||
return 0;
|
||||
#elif defined(CONFIG_DARWIN)
|
||||
mach_port_t thread;
|
||||
kern_return_t kr;
|
||||
mach_msg_type_number_t count;
|
||||
thread_basic_info_data_t info;
|
||||
|
||||
thread = mach_thread_self();
|
||||
count = THREAD_BASIC_INFO_COUNT;
|
||||
kr = thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count);
|
||||
mach_port_deallocate(mach_task_self(), thread);
|
||||
if (kr != KERN_SUCCESS || (info.flags & TH_FLAGS_IDLE) != 0) {
|
||||
error_setg_errno(errp, errno, "Unable to get thread CPU usage");
|
||||
return -1;
|
||||
}
|
||||
|
||||
*val_ms = ((info.user_time.seconds * 1000ll) +
|
||||
(info.user_time.microseconds / 1000));
|
||||
return 0;
|
||||
#elif defined(RUSAGE_THREAD)
|
||||
struct rusage ru;
|
||||
if (getrusage(RUSAGE_THREAD, &ru) < 0) {
|
||||
|
@ -19,6 +19,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "der.h"
|
||||
#include "rsakey.h"
|
||||
|
||||
void qcrypto_akcipher_rsakey_free(QCryptoAkCipherRSAKey *rsa_key)
|
||||
@ -37,6 +39,46 @@ void qcrypto_akcipher_rsakey_free(QCryptoAkCipherRSAKey *rsa_key)
|
||||
g_free(rsa_key);
|
||||
}
|
||||
|
||||
/**
|
||||
* PKCS#8 private key info for RSA
|
||||
*
|
||||
* PrivateKeyInfo ::= SEQUENCE {
|
||||
* version INTEGER,
|
||||
* privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,
|
||||
* privateKey OCTET STRING,
|
||||
* attributes [0] IMPLICIT Attributes OPTIONAL
|
||||
* }
|
||||
*/
|
||||
void qcrypto_akcipher_rsakey_export_p8info(const uint8_t *key,
|
||||
size_t keylen,
|
||||
uint8_t **dst,
|
||||
size_t *dlen)
|
||||
{
|
||||
QCryptoEncodeContext *ctx = qcrypto_der_encode_ctx_new();
|
||||
uint8_t version = 0;
|
||||
|
||||
qcrypto_der_encode_seq_begin(ctx);
|
||||
|
||||
/* version */
|
||||
qcrypto_der_encode_int(ctx, &version, sizeof(version));
|
||||
|
||||
/* algorithm identifier */
|
||||
qcrypto_der_encode_seq_begin(ctx);
|
||||
qcrypto_der_encode_oid(ctx, (uint8_t *)QCRYPTO_OID_rsaEncryption,
|
||||
sizeof(QCRYPTO_OID_rsaEncryption) - 1);
|
||||
qcrypto_der_encode_null(ctx);
|
||||
qcrypto_der_encode_seq_end(ctx);
|
||||
|
||||
/* RSA private key */
|
||||
qcrypto_der_encode_octet_str(ctx, key, keylen);
|
||||
|
||||
qcrypto_der_encode_seq_end(ctx);
|
||||
|
||||
*dlen = qcrypto_der_encode_ctx_buffer_len(ctx);
|
||||
*dst = g_malloc(*dlen);
|
||||
qcrypto_der_encode_ctx_flush_and_free(ctx, *dst);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NETTLE) && defined(CONFIG_HOGWEED)
|
||||
#include "rsakey-nettle.c.inc"
|
||||
#else
|
||||
|
@ -22,7 +22,6 @@
|
||||
#ifndef QCRYPTO_RSAKEY_H
|
||||
#define QCRYPTO_RSAKEY_H
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "crypto/akcipher.h"
|
||||
|
||||
@ -84,6 +83,16 @@ QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse(
|
||||
QCryptoAkCipherKeyType type,
|
||||
const uint8_t *key, size_t keylen, Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_akcipher_rsakey_export_as_p8info:
|
||||
*
|
||||
* Export RSA private key to PKCS#8 private key info.
|
||||
*/
|
||||
void qcrypto_akcipher_rsakey_export_p8info(const uint8_t *key,
|
||||
size_t keylen,
|
||||
uint8_t **dst,
|
||||
size_t *dlen);
|
||||
|
||||
void qcrypto_akcipher_rsakey_free(QCryptoAkCipherRSAKey *key);
|
||||
|
||||
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoAkCipherRSAKey,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user